id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5010610 | from typing import Any, Awaitable, Callable
from xpresso.openapi.models import SecurityScheme
class SecurityBase:
model: SecurityScheme
scheme_name: str
__call__: Callable[..., Awaitable[Any]]
| StarcoderdataPython |
3263398 | <filename>visualize.py
import matplotlib.pyplot as plt
import pickle
history = pickle.load(open("history.p", "rb"))
print(history)
# Plot the training and validation loss for each epoch
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
#plt.show()
plt.savefig('training_history.png')
| StarcoderdataPython |
8036317 | """Tests for zookeeper_remove_locks.py"""
import unittest
import os
import logging
from mock import patch, Mock
from lib.ingest.zookeeper_remove_locks import ZookeeperLocks
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class ZookeeperLocksFunctionsTest(unittest.TestCase):
"""Tests."""
def setUp(self):
logging.basicConfig()
def tearDown(self):
self.conn_mgr = None
self.ddl_types = None
@patch('lib.ingest.zookeeper_remove_locks.KazooClient')
def test_zookeeper_all_children(self, m_zkclient):
zookeeper_main_nodes = [u'fake_health_sandbox',
u'fake_open_scratch',
u'fake_database_i',
u'ingest',
u'ibis',
u'fake_im',
u'default']
m_zkclient().get_children = Mock(return_value=zookeeper_main_nodes)
self.zkl_locks = ZookeeperLocks("host1,host2,host3",
"default",
"sample_07")
self.assertEqual(self.zkl_locks.show_all_children(),
zookeeper_main_nodes)
@patch('lib.ingest.zookeeper_remove_locks.KazooClient')
def test_zookeeper_hive_namespace(self, m_zkclient):
zookeeper_main_nodes = [u'brokers',
u'zookeeper',
u'yarn-leader-election',
u'hadoop-ha',
u'hive_zookeeper_namespace_hive1',
u'isr_change_notification',
u'admin',
u'controller_epoch',
u'hive_zookeeper_namespace_hue1',
u'solr',
u'rmstore',
u'zkdtsm',
u'consumers',
u'config',
u'hbase']
m_zkclient().get_children = Mock(return_value=zookeeper_main_nodes)
self.zkl_locks = ZookeeperLocks("host1,host2,host3",
"default",
"sample_07")
self.assertEqual(self.zkl_locks.get_hive_namespace(),
"hive_zookeeper_namespace_hive1")
@patch('lib.ingest.zookeeper_remove_locks.KazooClient')
def test_zookeeper_my_rec(self, m_zkclient):
self.zkl_locks = ZookeeperLocks("host1,host2,host3",
"default",
"sample_07")
self.zkl_locks.get_hive_namespace = \
Mock(return_value="hive_zookeeper_namespace_hive1")
m_zkclient().delete = Mock(return_value="Deleted")
m_zkclient().get_children = \
Mock(return_value=["LOCK-EXCLUSIVE-0000000000"])
deep_lock = "hive_zookeeper_namespace_hive1/default/sample_07/" \
"source_database_name=ibis/source_table_name=ibistest"
expected_return_message = [("Deleted hive_zookeeper_namespace_hive1/"
"default/sample_07/"
"source_database_name=ibis/"
"source_table_name=ibistest/"
"LOCK-EXCLUSIVE-0000000000")]
self.assertEqual(self.zkl_locks.my_rec(deep_lock),
expected_return_message)
@patch('lib.ingest.zookeeper_remove_locks.KazooClient')
def test_zookeeper_my_rec_no_new_locks(self, m_zkclient):
self.zkl_locks = ZookeeperLocks("host1,host2,host3",
"default",
"sample_07")
self.zkl_locks.get_hive_namespace = \
Mock(return_value="hive_zookeeper_namespace_hive1")
m_zkclient().delete = Mock(return_value="Deleted")
m_zkclient().get_children = \
Mock(return_value=[])
deep_lock = "hive_zookeeper_namespace_hive1/default/sample_07/" \
"source_database_name=ibis/source_table_name=ibistest"
expected_return_message = "All done"
self.assertEqual(self.zkl_locks.my_rec(deep_lock),
expected_return_message)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3522152 | import random
a1 = str(input('Primeiro aluno: '))
a2 = str(input('Segundo aluno: '))
a3 = str(input('Terceiro aluno: '))
a4 = str(input('Quarto aluno: '))
lista = [a1, a2, a3, a4]
aa = random.sample(lista, 4)
print(f'O aluno escolhido é {aa}') | StarcoderdataPython |
6657991 | <reponame>haradashinya/cloud_logger<filename>cloud_logger/__init__.py
import datetime
import logging
import os
import time
import uuid
import json
import boto3
from io import StringIO
class CloudLoggerObject:
def __init__(self, format, name, **kwargs):
self.name = name
self.format = format
self.kwargs = kwargs
aws_access_key_id = os.environ.get(
'CLOUD_LOGGER_ACCESS_KEY_ID').strip()
aws_secret_access_key = os.environ.get(
'CLOUD_LOGGER_SECRET_ACCESS_KEY').strip()
aws_region_name = os.environ.get('CLOUD_LOGGER_REGION').strip()
self.client = boto3.client('logs',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=aws_region_name)
class CloudLogger:
def __init__(self, logger_obj):
self.logger_obj = logger_obj
def build_put_params(self, log_streams, log_stream_name, message):
params = dict(
logGroupName=self.logger_obj.name,
logStreamName=log_stream_name,
logEvents=[{
'timestamp': int(time.time()) * 1000,
'message': message
}],
)
try:
s_token = log_streams['logStreams'][0]['uploadSequenceToken']
params['sequenceToken'] = s_token
except:
return params
def get_log_streams(self, log_stream_name):
return self.logger_obj.client.describe_log_streams(
logGroupName=self.logger_obj.name,
logStreamNamePrefix=log_stream_name,
limit=1)
class CloudHandler(logging.StreamHandler):
level = logging.DEBUG
def __init__(self, logger_obj):
super().__init__()
logging.StreamHandler.__init__(self)
self.logger_obj = logger_obj
self.setFormatter(logging.Formatter(logger_obj.format))
client = self.logger_obj.client
# Create a log group
try:
client.create_log_group(logGroupName=self.logger_obj.name)
except Exception as e:
if e.__dict__['response']['Error']['Code'] != 'ResourceAlreadyExistsException':
raise Exception(f'Error happend when create a group: {e}')
# Create log streams
groups = ['DEBUG', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL']
for group in groups:
try:
client.create_log_stream(logGroupName=self.logger_obj.name,
logStreamName=group)
except Exception as e:
if e.__dict__['response']['Error']['Code'] != 'ResourceAlreadyExistsException':
raise Exception(f'Error happened when create a log stream: {e}')
def build_put_params(self, log_streams, log_stream_name, message):
params = dict(
logGroupName=self.logger_obj.name,
logStreamName=log_stream_name,
logEvents=[{
'timestamp': int(time.time()) * 1000,
'message': message
}],
)
try:
s_token = log_streams['logStreams'][0]['uploadSequenceToken']
params['sequenceToken'] = s_token
return params
except:
return params
def get_log_streams(self, log_stream_name):
return self.logger_obj.client.describe_log_streams(
logGroupName=self.logger_obj.name,
logStreamNamePrefix=log_stream_name,
limit=1)
def put_log_event(self, log_stream_name, msg):
self.logger_obj.client.put_log_events(**self.build_put_params(
self.get_log_streams(log_stream_name), log_stream_name, msg))
def emit(self, record):
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
log_stream_name = record.levelname
log_streams = self.get_log_streams(log_stream_name)
self.flush()
try:
self.put_log_event(log_stream_name, msg)
except Exception as e2:
print(f'PutLogEventError: {e2}')
except Exception as e:
self.handleError(record)
| StarcoderdataPython |
3326922 | # test fbchat api
import sys
from unittest.mock import patch
from pathlib import Path
import pytest
from postr import fbchat_api
from postr import config
sys.path.insert(0, '../postr')
my_file = Path('../postr_config.ini')
if not my_file.is_file():
pytest.skip('If there is no config_file, then all these tests will fail', allow_module_level=True)
class Object():
name: str = ''
uid: str = ''
first_name: str = ''
last_name: str = ''
text: str = ''
password: str = str(config.get_api_key('TESTFB', 'password'))
email: str = str(config.get_api_key('TESTFB', 'email'))
client = fbchat_api.FacebookChatApi(email, password)
def test_nothing(fbchat_test: int) -> None:
assert fbchat_test == 17
def test_get_user_id() -> None:
with patch('fbchat.Client.fetchAllUsers') as mock_fetch:
user_list = []
user1 = Object()
user1.name = 'sally'
user1.uid = '111222333'
user2 = Object()
user2.name = 'martha'
user2.uid = '1111111'
user_list.append(user1)
user_list.append(user2)
mock_fetch.return_value = user_list
user_id1 = client.get_user_id('sally')
user_id2 = client.get_user_id('martha')
assert user_id1 == user1.uid
assert user_id2 == user2.uid
def test_get_user_name() -> None:
with patch('fbchat.Client.fetchUserInfo') as mock_fetch:
test_dict = {}
user1 = Object()
user1.first_name = 'Sally'
user1.last_name = 'Glass'
uid = '111222333'
test_dict[uid] = user1
mock_fetch.return_value = test_dict
user_name = client.get_user_name(uid)
assert user_name == user1.first_name + ' ' + user1.last_name
def test_get_thread_name() -> None:
with patch('fbchat.Client.fetchThreadInfo') as mock_fetch:
test_dict = {}
obj1 = Object()
obj1.name = 'Game Design Chat'
tid = '246832'
test_dict[tid] = obj1
mock_fetch.return_value = test_dict
name = client.get_thread_name(tid)
assert name == obj1.name
def test_get_messages_from_thread() -> None:
with patch('fbchat.Client.fetchThreadMessages') as mock_fetch:
test_list = []
obj1 = Object()
obj1.text = 'This is a message to you'
test_list.append(obj1)
obj2 = Object()
obj2.text = 'This is a another message to you'
test_list.append(obj2)
obj3 = Object()
obj3.text = 'Why are you ignoring me?'
test_list.append(obj3)
obj4 = Object()
obj4.text = 'pls respond'
test_list.append(obj4)
mock_fetch.return_value = test_list
actual_list = client.get_messages_from_thread('112233', 4)
assert len(actual_list) == 4
assert actual_list[3] == 'This is a message to you'
assert actual_list[0] == 'pls respond'
def test_get_all_threads() -> None:
with patch('fbchat.Client.fetchThreadList') as mock_fetch:
test_list = []
obj1 = Object()
obj1.name = 'Billy'
obj1.uid = '11111'
test_list.append(obj1)
obj2 = Object()
obj2.name = 'Daniel'
obj2.uid = '22222'
test_list.append(obj2)
obj3 = Object()
obj3.name = 'Sally'
obj3.uid = '33333'
test_list.append(obj3)
obj4 = Object()
obj4.name = 'Fred'
obj4.uid = '44444'
test_list.append(obj4)
mock_fetch.return_value = test_list
actual_client = client.get_client()
thread_dict = fbchat_api.FacebookChatApi.get_all_threads(actual_client)
assert thread_dict['Sally'] == '33333'
assert thread_dict['Billy'] == '11111'
assert thread_dict['Daniel'] == '22222'
assert thread_dict['Fred'] == '44444'
def test_get_all_users_in_chat_with() -> None:
with patch('fbchat.Client.fetchAllUsers') as mock_fetch:
test_list = []
user1 = Object()
user1.name = 'Billy'
user1.uid = '11111'
test_list.append(user1)
user2 = Object()
user2.name = 'Daniel'
user2.uid = '22222'
test_list.append(user2)
user3 = Object()
user3.name = 'Sally'
user3.uid = '33333'
test_list.append(user3)
user4 = Object()
user4.name = 'Fred'
user4.uid = '44444'
test_list.append(user4)
mock_fetch.return_value = test_list
actual_client = client.get_client()
user_dict = fbchat_api.FacebookChatApi.get_all_users_in_chat_with(actual_client)
assert user_dict['Sally'] == '33333'
assert user_dict['Billy'] == '11111'
assert user_dict['Daniel'] == '22222'
assert user_dict['Fred'] == '44444'
def test_get_thread_id() -> None:
with patch('fbchat.Client.searchForThreads') as mock_get:
test_list = []
obj1 = Object()
obj1.name = 'Billy'
obj1.uid = '11111'
test_list.append(obj1)
mock_get.return_value = test_list
thread_id = client.get_thread_id('Billy')
assert thread_id == obj1.uid
def test_send_local_image_with_message() -> None:
with patch('fbchat.Client.sendLocalImage') as mock_send:
mock_send.return_value = None
client.send_local_image_with_message('thread_id', 'file_path', 'message')
mock_send.assert_called()
def test_send_remote_image_with_message() -> None:
with patch('fbchat.Client.sendRemoteImage') as mock_send:
mock_send.return_value = None
client.send_remote_image_with_message('thread_id', 'file_path', 'message')
mock_send.assert_called()
def test_send_text_message() -> None:
with patch('fbchat.Client.send') as mock_send:
mock_send.return_value = None
client.send_text_message('thread_id', 'message')
mock_send.assert_called()
def test_send_local_file_with_message() -> None:
with patch('fbchat.Client.sendLocalFiles') as mock_send:
mock_send.return_value = None
client.send_local_file_with_message('thread_id', 'file_path', 'message')
mock_send.assert_called()
# TODO these methods require mcoking two objects, which I have not figured out yet
# def test_get_threads_with_unread_messages() -> None:
# def test_get_all_users_in_this_chat() -> None:
# def test_start_thread_with_users() -> None:
# def test_start_thread_with_user() -> None:
# def test_delete_thread() -> None:
| StarcoderdataPython |
1754601 | <filename>test/test_converter_v3.py<gh_stars>0
import io
from lxml import objectify
import pathlib
import pytest
import tempfile
from hpxml_version_translator.converter import (
convert_hpxml3_to_4,
convert_hpxml_to_version,
)
from hpxml_version_translator import exceptions as exc
hpxml_dir = pathlib.Path(__file__).resolve().parent / "hpxml_v3_files"
def convert_hpxml_and_parse(input_filename, version="4.0"):
with tempfile.NamedTemporaryFile("w+b") as f_out:
convert_hpxml_to_version(version, input_filename, f_out)
f_out.seek(0)
root = objectify.parse(f_out).getroot()
return root
def test_version_change_to_4():
root = convert_hpxml_and_parse(hpxml_dir / "version_change.xml")
assert root.attrib["schemaVersion"] == "4.0"
def test_enclosure_foundation():
root = convert_hpxml_and_parse(hpxml_dir / "enclosure_foundation.xml")
for i in (0, 1):
fw1 = root.Building[i].BuildingDetails.Enclosure.FoundationWalls.FoundationWall[
0
]
assert not hasattr(fw1, "DistanceToTopOfInsulation")
assert not hasattr(fw1, "DistanceToBottomOfInsulation")
fw2 = root.Building[i].BuildingDetails.Enclosure.FoundationWalls.FoundationWall[
1
]
assert not hasattr(fw2, "DistanceToTopOfInsulation")
assert not hasattr(fw2, "DistanceToBottomOfInsulation")
assert fw2.Insulation.Layer[0].DistanceToTopOfInsulation == 1.0
assert fw2.Insulation.Layer[1].DistanceToTopOfInsulation == 1.0
assert fw2.Insulation.Layer[0].DistanceToBottomOfInsulation == 5.0
assert fw2.Insulation.Layer[1].DistanceToBottomOfInsulation == 5.0
sl1 = root.Building[i].BuildingDetails.Enclosure.Slabs.Slab[0]
assert not hasattr(fw1, "PerimeterInsulationDepth")
assert not hasattr(fw1, "UnderSlabInsulationWidth")
assert not hasattr(fw1, "UnderSlabInsulationSpansSlab")
assert sl1.PerimeterInsulation.Layer[0].InsulationDepth == 2.0
assert sl1.UnderSlabInsulation.Layer[0].InsulationWidth == 1.0
assert not sl1.UnderSlabInsulation.Layer[0].InsulationSpansEntireSlab
def test_battery():
root = convert_hpxml_and_parse(hpxml_dir / "battery.xml")
b1 = root.Building[0].BuildingDetails.Systems.Batteries.Battery[0]
assert b1.NominalCapacity.Units == "Ah"
assert b1.NominalCapacity.Value == 1000
assert b1.UsableCapacity.Units == "Ah"
assert b1.UsableCapacity.Value == 800
b2 = root.Building[0].BuildingDetails.Systems.Batteries.Battery[1]
assert b2.NominalCapacity.Units == "Ah"
assert b2.NominalCapacity.Value == 2000
assert b2.UsableCapacity.Units == "Ah"
assert b2.UsableCapacity.Value == 1600
def test_mismatch_version():
f_out = io.BytesIO()
with pytest.raises(
exc.HpxmlTranslationError,
match=r"convert_hpxml3_to_4 must have valid target version of 4\.x",
):
convert_hpxml3_to_4(hpxml_dir / "version_change.xml", f_out, "2.0")
| StarcoderdataPython |
3515253 | from rest_framework import serializers
from .models import DynatraceConfiguration
class DynatraceConfigurationSerializer(serializers.ModelSerializer):
class Meta:
model = DynatraceConfiguration
fields = ("id", "base_url", "api_key", "entity_selector")
| StarcoderdataPython |
8162055 | import torch
from torch import nn
import torchvision.models as models
import os
from config import DATA_PATH
import pickle
import bcolz
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
class Identity(nn.Module):
"""Identity layer.
# Arguments
input: Input tensor
"""
def forward(self, x):
return x
class AdaptiveHead(nn.Module):
r"""A collection of `torch.nn.Linear` with one active at a time, to be used
as an adaptive head for a classification model.
Args:
in_features: in_features: size of each input sample
out_features: list of sizes of output sample of each input sample
bias: If set to ``False``, the heads will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`
depends on the active head to be set by model.set_active(active).
Attributes:
fcs: list of `torch.nn.Linear`
out_features: :math:`H_{out}` of the currently active head
Examples::
>>> model = AdaptiveHead(20, [5, 20])
>>> input = torch.randn(128, 20)
>>> output = model(input)
>>> print(output.size())
torch.Size([128, 5])
>>> model.set_active(1)
>>> output = model(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, in_features, out_features, bias=True):
super(AdaptiveHead, self).__init__()
self.in_features = in_features
if not isinstance(out_features, list):
self._out_features = [out_features]
else:
self._out_features = out_features
for l, out_features in enumerate(self._out_features):
setattr(self,
"fc{}".format(l),
nn.Linear(self.in_features, out_features, bias=bias))
self._active = 0
self._has_bias = bias
def set_active(self, active):
assert active < len(self._out_features)
self._active = active
@property
def out_features(self):
return self._out_features[self._active]
@property
def fc(self):
return [getattr(self, "fc{}".format(l)) for l in self._out_features]
def forward(self, inputs):
fc = getattr(self, "fc{}".format(self._active))
return fc(inputs)
def extra_repr(self):
out_features_str = "{}/" * len(self._out_features)
out_features_str = out_features_str[:-1]
out_features_str = out_features_str.format(*self._out_features)
return "in_features={}, out_features={}, bias={}".format(
self.in_features, out_features_str, self._has_bias is not None
)
class AdaptiveHeadClassifier(nn.Module):
r"""A classifier model with a (possibly pre-trained) model body and a
flexible head to allow for fine-tuning and transfer learning.
Args:
out_features: list of sizes of output sample of each input sample
architecture: one of torchvision's model architectures,
e.g. ``'resnet50'``. Default ``'resnet18'``.
pretrained: If set to ``True``, the model will be initialized
with pre-trained weights. Default ``True``.
freeze: If set to ``True``, the torchvision model will be used as a
feature extractor with its weights frozen. The head will learn as
usual. Default ``False``.
Shape:
- Input: depends on the model architecture. Typically :math:`(N, C, H, W)`
where :math:`C, H, W` are the input channel, height and width.
- Output: :math:`(N, H_{out})` where :math:`H_{out} = \text{out\_features}`
depends on the active head, to be set by model.set_active(active)
Attributes:
model: one of torchvision's pre-defined models
model_head: instance of `few-shot-learning.models.AdaptiveHead`
out_features: :math:`H_{out}` of the currently active head
Examples::
>>> model = AdaptiveHeadClassifier([100, 5, 10])
>>> input = torch.randn(128, 3, 28, 28)
>>> output = model(input)
>>> print(output.size())
torch.Size([128, 100])
>>> model.set_active(2)
>>> output = model(input)
>>> print(output.size())
torch.Size([128, 10])
"""
def __init__(self, out_features, architecture='resnet18', pretrained=True,
freeze=False):
super(AdaptiveHeadClassifier, self).__init__()
self._pretrained = pretrained
self._architecture = architecture
self._freeze = freeze
self.model = models.__dict__[architecture](pretrained=pretrained)
# freeze all lower layers of the network
if self._freeze:
for param in self.model.parameters():
param.requires_grad = False
assert hasattr(self.model, "fc")
assert hasattr(self.model.fc, "in_features")
n_features = self.model.fc.in_features
self.model_head = AdaptiveHead(n_features, out_features)
self.model.fc = self.model_head
def set_active(self, active):
self.model_head.set_active(active)
@property
def out_features(self):
return self.model_head.out_features
def forward(self, inputs):
return self.model(inputs)
class ClassEmbedding(nn.Module):
r"""A classifier model with a (possibly pre-trained) model body and a
flexible head to allow for fine-tuning and transfer learning.
Args:
out_features: list of sizes of output sample of each input sample
architecture: one of torchvision's model architectures,
e.g. ``'resnet50'``. Default ``'resnet18'``.
pretrained: If set to ``True``, the model will be initialized
with pre-trained weights. Default ``True``.
freeze: If set to ``True``, the torchvision model will be used as a
feature extractor with its weights frozen. The head will learn as
usual. Default ``False``.
Shape:
- Input: depends on the model architecture. Typically :math:`(N, C, H, W)`
where :math:`C, H, W` are the input channel, height and width.
- Output: :math:`(N, H_{out})` where :math:`H_{out} = \text{out\_features}`
depends on the active head, to be set by model.set_active(active)
Attributes:
model: one of torchvision's pre-defined models
model_head: instance of `few-shot-learning.models.AdaptiveHead`
out_features: :math:`H_{out}` of the currently active head
Examples::
>>> TODO
"""
def __init__(self, out_features, train_embedding, eval_embedding,
embed_dim, attribute_dim):
super(ClassEmbedding, self).__init__()
self.out_features = out_features
self.embed_dim = train_embedding.shape[1]
self.attribute_dim = attribute_dim
self.train_embedding = self._create_embedding_layer(train_embedding)
self.eval_embedding = self._create_embedding_layer(eval_embedding)
self.fc = nn.Linear(self.embed_dim + self.attribute_dim,
self.out_features)
def embedding(self, y):
if self.training:
return self.train_embedding(y)
else:
return self.eval_embedding(y)
def forward(self, y, attributes):
# TODO unit length
# TODO ensure that y correctly maps to class label semantics
hidden = torch.cat([self.embedding(y), attributes], dim=1)
return self.fc(hidden)
def _create_embedding_layer(self, embedding):
num_embeddings, embedding_dim = embedding.shape
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': torch.from_numpy(embedding)})
emb_layer.weight.requires_grad = False
return emb_layer
| StarcoderdataPython |
399612 | <filename>demos/org.python3.11.0/static/org.python3.11.0.py<gh_stars>1-10
# suspicious or too slow
# test.test_fileio.PyAutoFileTests.testReadintoByteArray
# test.test_mimetypes.MimetypesCliTestCase.test_invalid_option
# test.test_getpath.MockGetPathTests.test_registry_win32 ?????? picke error on module obj
SLOW="""
test_pickle
"""
PROBLEMATIC = """
test.test_mimetypes.MimetypesCliTestCase.test_invalid_option
test.test_fileio.PyAutoFileTests.testReadintoByteArray
test.test_fileio.PyOtherFileTests.testTruncate
"""
FAILS= """
test_argparse test_code_module test_distutils test_ensurepip test_genericpath
test_inspect test_mailbox test_mailcap test_posixpath test_pydoc test_shutil
"""
MAYBE= """
builtins.float
test.test_zipapp.ZipAppCmdlineTest.test_info_command
test.test_stat.TestFilemodeCStat.test_directory
test.test_stat.TestFilemodeCStat.test_mode
test.test_stat.TestFilemodePyStat.test_directory
test.test_stat.TestFilemodePyStat.test_mode
test.test_posix.PosixTester.test_dup
test.test_posix.TestPosixDirFd.test_readlink_dir_fd
test.test_posix.TestPosixDirFd.test_chmod_dir_fd
test.test_pathlib.PathTest.test_chmod_follow_symlinks_true
test.test_pathlib.PosixPathTest.test_chmod_follow_symlinks_true
test.test_pathlib.PathTest.test_chmod
test.test_pathlib.PathTest.test_is_mount
test.test_pathlib.PathTest.test_samefile
test.test_pathlib.PathTest.test_stat
test.test_pathlib.PosixPathTest.test_chmod
test.test_pathlib.PosixPathTest.test_is_mount
test.test_pathlib.PosixPathTest.test_samefile
test.test_pathlib.PosixPathTest.test_stat
test.test_os.TestScandir.test_attributes
test.test_os.UtimeTests.test_utime
test.test_os.UtimeTests.test_utime_by_indexed
test.test_os.UtimeTests.test_utime_by_times
test.test_os.UtimeTests.test_utime_dir_fd
test.test_os.UtimeTests.test_utime_directory
test.test_os.UtimeTests.test_utime_fd
test.test_ntpath.NtCommonTest.test_samefile
test.test_ntpath.NtCommonTest.test_samestat
test.test_netrc.NetrcTestCase.test_security
test.test_multibytecodec.Test_IncrementalEncoder.test_subinterp
test.test_io.CIOTest.test_buffered_file_io
test.test_io.CIOTest.test_raw_file_io
test.test_io.PyIOTest.test_buffered_file_io
test.test_io.PyIOTest.test_raw_file_io
test.test_io.CBufferedWriterTest.test_truncate
test.test_io.PyBufferedWriterTest.test_truncate
test.test_io.CBufferedRandomTest.test_truncate
test.test_io.PyBufferedRandomTest.test_truncate
test.test_math.MathTests.testLog2Exact
test.test_math.MathTests.testRemainder
test.test_math.MathTests.test_mtestfile
test.test_math.MathTests.test_nextafter
test.test_math.MathTests.test_testfile
test.test_getpath.MockGetPathTests.test_registry_win32
test.test_dbm_dumb.DumbDBMTestCase.test_readonly_files
test.test_dbm_dumb.DumbDBMTestCase.test_dumbdbm_creation_mode
test.test_cgi.CgiTests.test_log
test.test_numeric_tower.ComparisonTest.test_mixed_comparisons
test.test_tempfile.TestMkdtemp.test_non_directory
test.test_tempfile.TestMkstempInner.test_non_directory
test.test_strtod.StrtodTests.test_bigcomp
test.test_strtod.StrtodTests.test_boundaries
test.test_strtod.StrtodTests.test_parsing
test.test_strtod.StrtodTests.test_particular
test.test_strtod.StrtodTests.test_underflow_boundary
test.test_random.MersenneTwister_TestBasicOps.test_choices_subnormal
test.test_random.SystemRandom_TestBasicOps.test_choices_subnormal
test.test_tarfile.GNUReadTest.test_sparse_file_00
test.test_tarfile.GNUReadTest.test_sparse_file_01
test.test_tarfile.GNUReadTest.test_sparse_file_10
test.test_tarfile.GNUReadTest.test_sparse_file_old
test.test_tarfile.MiscReadTest.test_extract_directory
test.test_tarfile.MiscReadTest.test_extract_pathlike_name
test.test_tarfile.MiscReadTest.test_extractall
test.test_tarfile.MiscReadTest.test_extractall_pathlike_name
test.test_mimetypes.MimetypesCliTestCase.test_help_option
test.test_importlib.extension.test_loader.Frozen_LoaderTests.test_is_package
test.test_importlib.extension.test_loader.Frozen_LoaderTests.test_load_module_API
test.test_importlib.extension.test_loader.Frozen_LoaderTests.test_module
test.test_importlib.extension.test_loader.Frozen_LoaderTests.test_module_reuse
test.test_importlib.extension.test_loader.Source_LoaderTests.test_is_package
test.test_importlib.extension.test_loader.Source_LoaderTests.test_load_module_API
test.test_importlib.extension.test_loader.Source_LoaderTests.test_module
test.test_importlib.extension.test_loader.Source_LoaderTests.test_module_reuse
test.test_importlib.extension.test_finder.Frozen_FinderTests.test_module
unittest.test.test_discovery.TestDiscovery.test_command_line_handling_do_discovery_too_many_arguments
unittest.test.test_program.Test_TestProgram.test_Exit
unittest.test.test_program.Test_TestProgram.test_ExitAsDefault
test.test_strftime.StrftimeTest.test_strftime
test.test_decimal.CWhitebox.test_from_tuple
test.test_sys.SysModuleTest.test_exit
test.test_exceptions.ExceptionTests.testRaising
test.test_sysconfig.TestSysConfig.test_get_config_h_filename
test.test_uu.UUFileTest.test_decode_mode
test.test_float.HexFloatTestCase.test_from_hex
test.test_capi.SubinterpreterTest.test_module_state_shared_in_global
test.test_cmath.CMathTests.test_specific_values
test.test_zipfile.TestsWithMultipleOpens.test_many_opens
test.test_zipfile.EncodedMetadataTests.test_cli_with_metadata_encoding
test.test_zipfile.OtherTests.test_comments
test.test_zipfile.StoredTestsWithSourceFile.test_add_file_before_1980
"""
OOM="""
test.test_decimal.CWhitebox.test_maxcontext_exact_arith
"""
#test.test_bytes.BytesTest.test_from_format
FATAL="""
ctypes.test.test_as_parameter.AsParamPropertyWrapperTestCase.test_byval
ctypes.test.test_as_parameter.AsParamPropertyWrapperTestCase.test_callbacks
ctypes.test.test_as_parameter.AsParamPropertyWrapperTestCase.test_callbacks_2
ctypes.test.test_as_parameter.AsParamPropertyWrapperTestCase.test_longlong_callbacks
ctypes.test.test_as_parameter.AsParamPropertyWrapperTestCase.test_shorts
ctypes.test.test_as_parameter.AsParamWrapperTestCase.test_callbacks
ctypes.test.test_as_parameter.AsParamWrapperTestCase.test_callbacks_2
ctypes.test.test_as_parameter.AsParamWrapperTestCase.test_longlong_callbacks
ctypes.test.test_as_parameter.AsParamWrapperTestCase.test_shorts
ctypes.test.test_as_parameter.BasicWrapTestCase.test_callbacks
ctypes.test.test_as_parameter.BasicWrapTestCase.test_callbacks_2
ctypes.test.test_as_parameter.BasicWrapTestCase.test_longlong_callbacks
ctypes.test.test_as_parameter.BasicWrapTestCase.test_shorts
ctypes.test.test_callbacks.Callbacks.test_byte
ctypes.test.test_callbacks.Callbacks.test_char
ctypes.test.test_callbacks.Callbacks.test_double
ctypes.test.test_callbacks.Callbacks.test_float
ctypes.test.test_callbacks.Callbacks.test_int
ctypes.test.test_callbacks.Callbacks.test_issue12483
ctypes.test.test_callbacks.Callbacks.test_issue_7959
ctypes.test.test_callbacks.Callbacks.test_long
ctypes.test.test_callbacks.Callbacks.test_longdouble
ctypes.test.test_callbacks.Callbacks.test_longlong
ctypes.test.test_callbacks.Callbacks.test_pyobject
ctypes.test.test_callbacks.Callbacks.test_short
ctypes.test.test_callbacks.Callbacks.test_ubyte
ctypes.test.test_callbacks.Callbacks.test_uint
ctypes.test.test_callbacks.Callbacks.test_ulong
ctypes.test.test_callbacks.Callbacks.test_ulonglong
ctypes.test.test_callbacks.Callbacks.test_unsupported_restype_1
ctypes.test.test_callbacks.Callbacks.test_unsupported_restype_2
ctypes.test.test_callbacks.Callbacks.test_ushort
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_callback_large_struct
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_callback_register_double
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_callback_register_int
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_callback_too_many_args
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_convert_result_error
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_integrate
ctypes.test.test_callbacks.SampleCallbacksTestCase.test_issue_8959_a
ctypes.test.test_frombuffer.Test.test_fortran_contiguous
ctypes.test.test_funcptr.CFuncPtrTestCase.test_basic
ctypes.test.test_funcptr.CFuncPtrTestCase.test_first
ctypes.test.test_funcptr.CFuncPtrTestCase.test_structures
ctypes.test.test_functions.FunctionTestCase.test_callbacks
ctypes.test.test_functions.FunctionTestCase.test_callbacks_2
ctypes.test.test_functions.FunctionTestCase.test_longlong_callbacks
ctypes.test.test_functions.FunctionTestCase.test_sf1651235
ctypes.test.test_functions.FunctionTestCase.test_shorts
ctypes.test.test_libc.LibTest.test_qsort
ctypes.test.test_pickling.PickleTest_0.test_unpickable
ctypes.test.test_pickling.PickleTest_1.test_unpickable
ctypes.test.test_pickling.PickleTest_2.test_unpickable
ctypes.test.test_pickling.PickleTest_3.test_unpickable
ctypes.test.test_pickling.PickleTest_4.test_unpickable
ctypes.test.test_pickling.PickleTest_5.test_unpickable
ctypes.test.test_pointers.PointersTestCase.test_callbacks_with_pointers
ctypes.test.test_prototypes.ArrayTest.test
ctypes.test.test_python_api.PythonAPITestCase.test_PyOS_snprintf
ctypes.test.test_random_things.CallbackTracbackTestCase.test_FloatDivisionError
ctypes.test.test_random_things.CallbackTracbackTestCase.test_IntegerDivisionError
ctypes.test.test_random_things.CallbackTracbackTestCase.test_TypeErrorDivisionError
ctypes.test.test_random_things.CallbackTracbackTestCase.test_ValueError
ctypes.test.test_refcounts.AnotherLeak.test_callback
ctypes.test.test_refcounts.RefcountTestCase.test_1
ctypes.test.test_refcounts.RefcountTestCase.test_refcount
ctypes.test.test_simplesubclasses.Test.test_ignore_retval
ctypes.test.test_simplesubclasses.Test.test_int_callback
"""
TESTS = """
test_grammar test_opcodes test_dict
test___all__ test___future__ test__locale test__opcode
test__osx_support test__xxsubinterpreters test_abc
test_abstract_numbers test_aifc test_argparse test_array
test_asdl_parser test_ast test_asyncgen test_asynchat test_asyncio
test_asyncore test_atexit test_audioop test_audit test_augassign
test_base64 test_baseexception test_bdb test_bigaddrspace
test_bigmem test_binascii test_binop test_bisect test_bool
test_buffer test_bufio test_builtin test_bytes test_bz2
test_c_locale_coercion test_calendar test_call test_capi test_cgi
test_cgitb test_charmapcodec test_check_c_globals test_class
test_clinic test_cmath test_cmd test_cmd_line test_cmd_line_script
test_code test_code_module test_codeccallbacks
test_codecencodings_cn test_codecencodings_hk
test_codecencodings_iso2022 test_codecencodings_jp
test_codecencodings_kr test_codecencodings_tw test_codecmaps_cn
test_codecmaps_hk test_codecmaps_jp test_codecmaps_kr
test_codecmaps_tw test_codecs test_codeop test_collections
test_colorsys test_compare test_compile test_compileall
test_complex test_concurrent_futures test_configparser
test_contains test_context test_contextlib test_contextlib_async
test_copy test_copyreg test_coroutines test_cprofile test_crashers
test_crypt test_csv test_ctypes test_curses test_dataclasses
test_datetime test_dbm test_dbm_dumb test_dbm_gnu test_dbm_ndbm
test_decimal test_decorators test_defaultdict test_deque
test_descr test_descrtut test_devpoll test_dict test_dict_version
test_dictcomps test_dictviews test_difflib test_dis test_distutils
test_doctest test_doctest2 test_docxmlrpc test_dtrace test_dynamic
test_dynamicclassattribute test_eintr test_email test_embed
test_ensurepip test_enum test_enumerate test_eof test_epoll
test_errno test_except_star test_exception_group
test_exception_hierarchy test_exception_variations test_exceptions
test_extcall test_faulthandler test_fcntl test_file
test_file_eintr test_filecmp test_fileinput test_fileio
test_fileutils test_finalization test_float test_flufl
test_fnmatch test_fork1 test_format test_fractions test_frame
test_frozen test_fstring test_ftplib test_funcattrs test_functools
test_future test_future3 test_future4 test_future5 test_gc
test_gdb test_generator_stop test_generators test_genericalias
test_genericclass test_genericpath test_genexps test_getargs2
test_getopt test_getpass test_getpath test_gettext test_glob
test_global test_graphlib test_grp test_gzip test_hash
test_hashlib test_heapq test_hmac test_html test_htmlparser
test_http_cookiejar test_http_cookies test_httplib
test_httpservers test_idle test_imaplib test_imghdr test_imp
test_import test_importlib test_index test_inspect test_int
test_int_literal test_interpreters test_io test_ioctl
test_ipaddress test_isinstance test_iter test_iterlen
test_itertools test_json test_keyword test_keywordonlyarg
test_kqueue test_largefile test_launcher test_lib2to3
test_linecache test_list test_listcomps test_lltrace test_locale
test_logging test_long test_longexp test_lzma test_mailbox
test_mailcap test_marshal test_math test_memoryio test_memoryview
test_metaclass test_mimetypes test_minidom test_mmap test_module
test_modulefinder test_msilib test_multibytecodec
test_multiprocessing_fork test_multiprocessing_forkserver
test_multiprocessing_main_handling test_multiprocessing_spawn
test_named_expressions test_netrc test_nis test_nntplib
test_ntpath test_numeric_tower test_opcache test_openpty
test_operator test_optparse test_ordered_dict test_os
test_ossaudiodev test_osx_env test_pathlib test_patma test_pdb
test_peepholer test_peg_generator test_pep646_syntax test_pickle
test_picklebuffer test_pickletools test_pipes test_pkg
test_pkgutil test_platform test_plistlib test_poll test_popen
test_poplib test_positional_only_arg test_posix test_posixpath
test_pow test_pprint test_print test_profile test_property
test_pstats test_pty test_pulldom test_pwd test_py_compile
test_pyclbr test_pydoc test_pyexpat test_queue test_quopri
test_raise test_random test_range test_re test_readline
test_regrtest test_repl test_reprlib test_resource test_richcmp
test_rlcompleter test_robotparser test_runpy test_sax test_sched
test_scope test_script_helper test_secrets test_select
test_selectors test_set test_setcomps test_shelve test_shlex
test_shutil test_signal test_site test_slice test_smtpd
test_smtplib test_smtpnet test_sndhdr test_socket
test_socketserver test_sort test_source_encoding test_spwd
test_ssl test_stable_abi_ctypes test_startfile test_stat
test_statistics test_strftime test_string test_string_literals
test_stringprep test_strptime test_strtod test_struct
test_structmembers test_structseq test_subclassinit
test_subprocess test_sunau test_sundry test_super test_support
test_symtable test_syntax test_sys test_sys_setprofile
test_sys_settrace test_sysconfig test_syslog test_tabnanny
test_tarfile test_tcl test_telnetlib test_tempfile test_textwrap
test_thread test_threadedtempfile test_threading
test_threading_local test_threadsignals test_time test_timeit
test_timeout test_tix test_tk test_tokenize test_tools test_trace
test_traceback test_tracemalloc test_ttk_guionly test_ttk_textonly
test_tuple test_turtle test_type_annotations test_type_cache
test_type_comments test_typechecks test_types test_typing test_ucn
test_unary test_unicode test_unicode_file
test_unicode_file_functions test_unicode_identifiers
test_unicodedata test_unittest test_univnewlines test_unpack
test_unpack_ex test_unparse test_urllib test_urllib2
test_urllib2_localnet test_urllib2net test_urllib_response
test_urllibnet test_urlparse test_userdict test_userlist
test_userstring test_utf8_mode test_utf8source test_uu test_uuid
test_venv test_wait3 test_wait4 test_warnings test_wave
test_weakref test_weakset test_webbrowser test_winconsoleio
test_winreg test_winsound test_with test_wsgiref test_xdrlib
test_xml_dom_minicompat test_xml_etree test_xml_etree_c
test_xmlrpc test_xmlrpc_net test_xxlimited test_xxtestfuzz
test_yield_from test_zipapp test_zipfile test_zipfile64
test_zipimport test_zipimport_support test_zlib test_zoneinfo
"""
#============================================================================
import sys, os
import asyncio
six = '/data/data/org.python/assets/cpython.six'
if os.path.isfile(six):
print(open(six).read())
print('CPython',sys.version,'\n', file=sys.stderr)
# or test_platform will fail
sys.modules.pop('platform', None)
def skip_list(*blocks):
__import__('importlib').invalidate_caches()
SKIP = "".join(blocks)
for skip in SKIP.replace('\n',' ').split(' '):
if skip:
sys.argv.append("-i")
sys.argv.append(skip)
def testv(argv):
sys.argv.append("-v")
skip_list(OOM, FATAL)
from test.libregrtest.main import Regrtest
RT = Regrtest()
RT.parse_args({})
RT._main([argv], {})
def test(*argv):
global RT, tlist, SKIPLIST, ALL, STDLIB
global SLOW, PROBLEMATIC, MAYBE, OOM, FATAL, FAILS
if len(argv):
sys.argv.extend(*argv)
skip_list(SLOW, PROBLEMATIC, MAYBE, OOM, FATAL)
from test.libregrtest.main import Regrtest
RT = Regrtest()
RT.parse_args({})
SKIPLIST = []
# those are fatal
SKIPLIST += ["test_code"]
# those are extremely slow and fail
SKIPLIST += ["test_zipfile"]
# known to fail
for t in FAILS.replace('\n',' ').split(' '):
if t and not t in SKIPLIST:
SKIPLIST.append(t)
tlist = []
#start_list = True
ALL = []
for t in TESTS.replace('\n',' ').split(' '):
if t:
ALL.append(t)
# if not start_list:
# if t==SKIPLIST[-1]:
# start_list = True
# continue
if t not in SKIPLIST:
tlist.append(t)
asyncio.run( run_tests( tlist ) )
async def run_tests(tlist):
global RT, BAD, ALL, SKIPLIST
BAD = []
print("Starting now ...")
for t in tlist:
RT._main([t], {})
for bad in RT.bad:
if not bad in BAD:
BAD.append(bad)
RT.bad.clear()
#print("BAD",len(BAD),":", *BAD)
await asyncio.sleep(0)
print("========== run_tests done ============")
print("Tests total:", len(ALL) )
print("Skipped:", len(SKIPLIST), *SKIPLIST )
print("Failed total:", len(BAD) , *BAD)
print()
print( sys._emscripten_info )
print()
def sys_exit(*ec):
pass
sys.exit = sys_exit
print()
print( sys._emscripten_info )
print()
if sys.argv[-1].startswith('org.python3'):
defer = ( print,"""
test() => run whole testsuite
testv("test_xxxx") => verbose run the test_xxxxx test set
""")
elif sys.argv[-1]!='all':
print("starting one verbose test : ",sys.argv[-1])
defer = ( testv, sys.argv[-1] )
else:
print(" - starting full testsuite in a few seconds -")
defer = ( test, )
print("""
Please Wait while gathering testsuite ...
""")
(aio
.after( window.mount_at("org.python3.11.0.apk", "/usr/lib", "/assets") )
.then( *defer )
)
#
| StarcoderdataPython |
5197801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
'''
功能:基于http的可目录浏览网站文件的递归下载
'''
#baseurl="http://10.166.7.151/docs/powerui2"
baseurl="http://10.166.7.151/docs/jquery-easyui-1.4.2"
def getpginfo(url,path):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
ret= parse(response.content,path)
#print ret
for a,b,c in ret:
if a=='[DIR]':
print b
getpginfo(url+"/"+b,path+b)
#baseurl=url+"/"+b
else:
print baseurl+c+b
downfile(baseurl+c+b,c)
def parse(html,path):
soup = BeautifulSoup(html,"html.parser")
[s.extract() for s in soup.find_all('th')]
soup = BeautifulSoup(soup.prettify(),"html.parser")
trs=soup.find_all('tr')
#print trs
ret=[]
for tr in trs:
if tr.get_text()=="\n":
continue
tds=tr.find_all("td")
#print tds
x=''
y=''
for td in tds:
img=td.find("img")
#print "img",img
a=td.find("a")
#print "a",img
if img:
if img!=-1:
x=img.get("alt")
if a:
if a!=-1:
if a.get_text()!='Parent Directory':
y=a.get("href")
ret.append((x,y,path))
return ret
def downfile(url,path):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "start download...",url
r=requests.get(url,headers=headers,stream=True)
filename=url.split('/')[-1].strip().replace("?","")
filename=path.replace("/","-")+filename;
with open(filename,'wb')as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
print filename,"download ok!"
if __name__ == '__main__' :
getpginfo(baseurl,"/")
| StarcoderdataPython |
8042388 | # Copyright 2016-2022 The <NAME> Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/kiosk-redis-consumer/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MesmerConsumer class for consuming Mesmer whole cell segmentation jobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
from deepcell.applications import ScaleDetection, Mesmer
from redis_consumer.consumers import TensorFlowServingConsumer
from redis_consumer import settings
class MesmerConsumer(TensorFlowServingConsumer):
"""Consumes image files and uploads the results"""
def detect_scale(self, image):
"""Send the image to the SCALE_DETECT_MODEL to detect the relative
scale difference from the image to the model's training data.
Args:
image (numpy.array): The image data.
Returns:
scale (float): The detected scale, used to rescale data.
"""
start = timeit.default_timer()
app = self.get_grpc_app(settings.SCALE_DETECT_MODEL, ScaleDetection)
if not settings.SCALE_DETECT_ENABLED:
self.logger.debug('Scale detection disabled.')
return 1
# TODO: What to do with multi-channel data?
# detected_scale = app.predict(image[..., 0])
detected_scale = 1
self.logger.debug('Scale %s detected in %s seconds',
detected_scale, timeit.default_timer() - start)
return detected_scale
def _consume(self, redis_hash):
start = timeit.default_timer()
hvals = self.redis.hgetall(redis_hash)
if hvals.get('status') in self.finished_statuses:
self.logger.warning('Found completed hash `%s` with status %s.',
redis_hash, hvals.get('status'))
return hvals.get('status')
self.logger.debug('Found hash to process `%s` with status `%s`.',
redis_hash, hvals.get('status'))
self.update_key(redis_hash, {
'status': 'started',
'identity_started': self.name,
})
# Get model_name and version
model_name, model_version = settings.MESMER_MODEL.split(':')
_ = timeit.default_timer()
# Load input image
fname = hvals.get('input_file_name')
image = self.download_image(fname)
# squeeze extra dimension that is added by get_image
image = np.squeeze(image)
# add in the batch dim
image = np.expand_dims(image, axis=0)
# Pre-process data before sending to the model
self.update_key(redis_hash, {
'status': 'pre-processing',
'download_time': timeit.default_timer() - _,
})
# TODO: implement detect_scale here for multiplex model
scale = hvals.get('scale', '')
scale = self.get_image_scale(scale, image, redis_hash)
# detect dimension order and add to redis
dim_order = self.detect_dimension_order(image, model_name, model_version)
self.update_key(redis_hash, {
'dim_order': ','.join(dim_order)
})
# Validate input image
if hvals.get('channels'):
channels = [int(c) for c in hvals.get('channels').split(',')]
else:
channels = None
image = self.validate_model_input(image, model_name, model_version,
channels=channels)
# Send data to the model
self.update_key(redis_hash, {'status': 'predicting'})
app = self.get_grpc_app(settings.MESMER_MODEL, Mesmer)
compartment = hvals.get('compartment', settings.MESMER_COMPARTMENT)
# with new batching update in deepcell.applications,
# app.predict() cannot handle a batch_size of None.
batch_size = app.model.get_batch_size()
results = app.predict(image, batch_size=batch_size,
compartment=compartment,
image_mpp=scale * app.model_mpp)
# Save the post-processed results to a file
_ = timeit.default_timer()
self.update_key(redis_hash, {'status': 'saving-results'})
save_name = hvals.get('original_name', fname)
dest, output_url = self.save_output(results, save_name)
# Update redis with the final results
end = timeit.default_timer()
self.update_key(redis_hash, {
'status': self.final_status,
'output_url': output_url,
'upload_time': end - _,
'output_file_name': dest,
'total_jobs': 1,
'total_time': end - start,
'finished_at': self.get_current_timestamp()
})
return self.final_status
| StarcoderdataPython |
5095543 | import numpy as np
import torch
from torch import Tensor
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class CutMix(torch.nn.Module):
""" CutMix based on "CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features"
adpoted from https://github.com/clovaai/CutMix-PyTorch/blob/2d8eb68faff7fe4962776ad51d175c3b01a25734/train.py#L279
"""
def __init__(
self,
beta: int=1,
cutmix_prob: float=0.3,
device: str='cpu',
) -> None:
super().__init__()
self.beta = beta
self.cutmix_prob = cutmix_prob
self.device = device
def forward(self, inputs: Tensor, labels: Tensor) -> tuple:
"""
img (PIL Image or Tensor): Image to be transformed.
label (PIL Image or Tensor): Label to be transformed.
Returns:
PIL Image or Tensor: Transformed image & label.
"""
r = np.random.rand(1)
if self.beta > 0 and r < self.cutmix_prob:
# generate mixed sample
lam = np.random.beta(self.beta, self.beta)
rand_index = torch.randperm(inputs.size()[0]).to(self.device)
bbx1, bby1, bbx2, bby2 = rand_bbox(inputs.size(), lam)
inputs[:, :, bbx1:bbx2, bby1:bby2] = inputs[rand_index, :, bbx1:bbx2, bby1:bby2]
labels[:, bbx1:bbx2, bby1:bby2] = labels[rand_index, bbx1:bbx2, bby1:bby2]
return inputs, labels | StarcoderdataPython |
6667384 | <gh_stars>100-1000
#!/usr/bin/env python3
"""
Simple script to convert .org url lines to .md urls
+ [[http://www.h4labs.com][h4labs]]
to
+ [h4labs](http://www.h4labs.com)
If the line doesn't match, simply print the line.
bin/org_url2md.py haskell.md
"""
import os
import sys
import re
prog = re.compile(r"\+ \[\[(.*?)\]\[(.*?)\]\]")
def convert2md(line):
str = line.strip()
m = prog.match(str)
if m:
n = len(m.groups())
#print("n={}".format(len(m.groups())))
if n == 2:
print("+ [{}]({})".format(m.group(2), m.group(1)))
return
print("{}".format(str))
def org2md(fname):
with open(fname) as fp:
line = fp.readline()
while line:
convert2md(line)
# print("{}".format(line), end="")
line = fp.readline()
filepath = sys.argv[1]
if not os.path.isfile(filepath):
print("File path {} does not exist. Exiting...".format(filepath))
sys.exit()
org2md(filepath)
| StarcoderdataPython |
353626 | from connection import client
class My_mongo():
"""
My mongodb manager that i made to learn how to interract with mongodb using pymongo.
Currently supports creating collection,showing collections, inserting single/multi elements
finding/deleting/showing elements.
"""
def __init__(self, db_name, col_name):
self.client = client
self.my_db = client[db_name]
self.collection = self.my_db[col_name]
def create_collection(self, name):
col_list = self.my_db.list_collection_names()
if name in col_list:
print("The collection exists.")
self.collection = self.my_db[name]
else:
print("The collection created.")
self.collection = self.my_db[name]
def show_collections(self):
col_list = self.my_db.list_collection_names()
for i in col_list:
print(i)
return col_list
def insert_element(self, element):
inserted = self.collection.insert_one(element)
print(f"id: {inserted.inserted_id} inserted to dabatase")
def insert_elements(self, elements):
inserted = self.collection.insert_many(elements)
print(f"{len(elements)} element added to database.\nInserted ids: {inserted.inserted_ids}")
def show_elements(self):
elements = self.collection.find()
for i in elements:
print(i)
return elements
def find_element(self, my_filter):
element = self.collection.find_one(my_filter)
if element:
return element
else:
print("Element not found")
def find_elements(self, my_filter, number=10):
elements = self.collection.find(my_filter).limit(number)
if elements:
for i in elements:
print(i)
else:
print("Element not found")
return elements
def delete_element(self, element):
element = self.collection.find_one(element)
if element:
print("Found element:", element)
user_input = input("Want to delete this element? (Y/N)\n>>")
if user_input.lower() == "y":
self.collection.delete_one(element)
print("Element Deleted..")
else:
print("Element not deleted")
if __name__ == "__main__":
db = My_mongo("test_db", "products")
# db.create_collection("products")
# db.show_collections()
# product = {"Name": "Dell 4860", "Price": 2000, "Date": "03/02/2016", "Status": "intel i7, 8gb, 1tb hdd"}
# db.insert_element(product)
# products = [{"Name": "Samsung s7", "Price": 3000, "Date": "03/02/2016", "Status": "Used"},
# {"Name": "Iphone 6", "Price": 5000, "Date": "03/02/2018", "Status": "Screen Shattered"},
# {"Name": "Nokia 3", "Price": 1000, "Date": "03/02/2018", "Status": "None"}]
# db.insert_elements(products)
# db.show_elements()
my_filter = {"Price": 5000}
# element = db.find_element(my_filter)
# print(element)
# elements = db.find_elements(my_filter, number=2)
# db.delete_element(my_filter)
| StarcoderdataPython |
1639580 | <filename>alldigitalradio/hardware/qmtech_wukong.py<gh_stars>0
from alldigitalradio.io.xilinx_gtp import XilinxGTPSerdes
from nmigen.build import Resource, Pins, Attrs
def load():
from boards.qmtech_wukong import Platform as wukong
wukong.resources += [
Resource("clk_n", 0, Pins("AB13", dir="i")),
Resource("clk_p", 0, Pins("AA13", dir="i")),
Resource("tx_n", 0, Pins("AD10")),
Resource("tx_p", 0, Pins("AC10")),
Resource("rx_n", 0, Pins("AD12")),
Resource("rx_p", 0, Pins("AC12"))
]
return (wukong, XilinxGTPSerdes)
| StarcoderdataPython |
5199016 | <reponame>Pallagani-Praveen/CodeSignal
# Question Link : https://app.codesignal.com/challenge/LNukgzSzfY88RzNjF
# Level : Easy
# Solution is right below:-
def boxBlur(image):
def helper(image,i,j):
r_one = image[i-1][j-1]+image[i-1][j]+image[i-1][j+1]
r_two = image[i][j-1]+image[i][j]+image[i][j+1]
r_three = image[i+1][j-1]+image[i+1][j]+image[i+1][j+1]
return (r_one+r_two+r_three)//9
row,col = len(image),len(image[0])
res = [ [ 0 for j in range(col-2)] for i in range(row-2)]
for i in range(1,row-1):
for j in range(1,col-1):
val = helper(image,i,j)
res[i-1][j-1] = val
return res
print('Result : ',boxBlur([[1,1,1], [1,7,1], [1,1,1]]))
| StarcoderdataPython |
6626153 | import argparse
import random
from datetime import timedelta
from operator import getitem
from os import listdir, makedirs, remove
from os.path import join, exists, getsize
import h5py
import librosa
import numpy as np
import pandas as pd
import soundfile as sf
from python_speech_features import mfcc
from scipy.io import wavfile
from tqdm import tqdm
from corpus.corpus import DeepSpeechCorpus
from util.audio_util import distort_audio
from util.corpus_util import get_corpus
from util.log_util import create_args_str
parser = argparse.ArgumentParser(description="""Export speech segments of corpus to CSV files and synthesize data""")
parser.add_argument('-id', type=str, required=True,
help='target-ID for processed files')
parser.add_argument('-s', '--source_dir', type=str, required=True,
help='id of corpus or path to corpus to export')
parser.add_argument('-t', '--target_dir', type=str, required=True,
help='target directory to save results')
parser.add_argument('-l', '--language', type=str, required=True,
help='language to use')
parser.add_argument('-f', '--force', action='store_true',
help='(optional) force override existing files. Default: False')
parser.add_argument('-x', '--synthesize', action='store_true',
help='whether to create synthesized data')
parser.add_argument('-num', '--include_numeric', action='store_true', default=False,
help='(optional) whether to include transcripts with numeric chars (default: False)')
parser.add_argument('-min', '--min_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-max', '--max_duration', nargs='?', type=int, default=0,
help='(optional) maximum number of speech segments minutes to process (default: all)')
parser.add_argument('-p', '--precompute_features', action='store_true',
help='(optional) precompute MFCC features in HDF5 format. Default: False')
args = parser.parse_args()
def main(args):
print(create_args_str(args))
target_dir, corpus_id, force, synthesize, min_dur, max_dur, precompute_features = setup(args)
corpus = get_corpus(args.source_dir, args.language)
corpus.summary()
print(f'processing {corpus.name} corpus and saving split segments in {target_dir}')
csv_train, csv_dev, csv_test = extract_segments(target_dir, corpus_id, corpus, synthesize, min_dur, max_dur, force)
print(f'done! All files are in {target_dir}')
corpus = DeepSpeechCorpus(args.language, csv_train, csv_dev, csv_test)
corpus.summary()
if precompute_features:
print(f'pre-computing features')
compute_features(csv_train, csv_dev, csv_test, target_dir, force)
def setup(args):
target_dir = join(args.target_dir, args.id)
if not exists(target_dir):
print(f'target directory {target_dir} does not exist. Creating...')
makedirs(target_dir)
force = args.force
if not force and listdir(target_dir):
inp = input(f"""
WARNING: target directory {target_dir} already exists. Override?
(this will overwrite all existing files in {target_dir} with the same names!!!) (Y/n)
""")
force = inp.lower() in ['', 'y']
return target_dir, args.id, force, args.synthesize, args.min_duration, args.max_duration, args.precompute_features
def extract_segments(target_dir, corpus_id, corpus, synthesize=False, min_dur=0, max_dur=0, force=False):
train_set = corpus.train_set(numeric=args.include_numeric)
dev_set = corpus.dev_set(numeric=args.include_numeric)
test_set = corpus.test_set(numeric=args.include_numeric)
print(f'training length is: {timedelta(seconds=sum(seg.duration for seg in train_set))}')
print(f'dev length is: {timedelta(seconds=sum(seg.duration for seg in dev_set))}')
print(f'test length is: {timedelta(seconds=sum(seg.duration for seg in test_set))}')
print(f'processing training segments')
csv_train = process_subset('train', train_set, synthesize, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_dev = process_subset('dev', dev_set, False, corpus_id, target_dir, min_dur, max_dur, force)
print(f'processing validation segments (data is only synthesized for training set)')
csv_test = process_subset('test', test_set, False, corpus_id, target_dir, min_dur, max_dur, force)
return csv_train, csv_dev, csv_test
def process_subset(subset_id, subset, synthesize, corpus_id, target_dir, min_dur, max_dur, force):
df = split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force)
csv_path = join(target_dir, f'{corpus_id}-{subset_id}.csv')
print(f'saving metadata in {csv_path}')
df.to_csv(csv_path, index=False)
return csv_path
def split_speech_segments(subset, corpus_id, subset_id, target_dir, synthesize, min_dur, max_dur, force):
total = len(subset)
if max_dur:
print(f'trying to cap numer of speech segments to a total length of {max_dur} minutes. '
f'Speech segements will be sorted by length before capping.')
tot_duration = sum(s.duration for s in subset) / 60
if tot_duration < max_dur:
print(f'WARNING: maximum length of corpus was set to {max_dur} minutes, but total length of all '
f'speech segments is only {tot_duration} minutes! '
f'-> using all entries from corpus ({total} speech segments)')
else:
for i, s in enumerate(sorted(subset, key=lambda s: s.duration)):
if sum(s.duration for s in subset[:i]) > max_dur * 60:
break
print(f'total length of corpus will be capped at {max_dur} minutes ({i} speech segments)')
total = i
subset = subset[:i]
segments = []
files = []
sum_duration = 0
progress = tqdm(subset, total=total, unit=' speech segments')
for i, segment in enumerate(progress):
segment_id = f'{corpus_id}-{subset_id}-{i:0=4d}'
wav_path = f'{segment_id}.wav'
wav_path_absolute = join(target_dir, wav_path)
if not exists(wav_path_absolute) or not getsize(wav_path_absolute) or force:
sf.write(wav_path_absolute, segment.audio, segment.rate, subtype='PCM_16')
segments.append((segment_id, segment.audio, segment.rate, segment.transcript))
files.append((wav_path, getsize(wav_path_absolute), segment.duration, segment.transcript))
sum_duration += segment.duration
if synthesize:
audio, rate = librosa.load(wav_path_absolute, sr=16000, mono=True)
wav_shift = f'{segment_id}-shift.wav'
wav_echo = f'{segment_id}-echo.wav'
wav_high = f'{segment_id}-high.wav'
wav_low = f'{segment_id}-low.wav'
wav_fast = f'{segment_id}-fast.wav'
wav_slow = f'{segment_id}-slow.wav'
wav_loud = f'{segment_id}-loud.wav'
wav_quiet = f'{segment_id}-quiet.wav'
shift = random.uniform(0.5, 1.5)
wav_shift_path = join(target_dir, wav_shift)
wav_shift_len = synthesize_and_write(audio, rate, wav_shift_path, shift=shift, force=force)
files.append((wav_shift, getsize(wav_shift_path), wav_shift_len, segment.transcript))
echo = random.randint(30, 100)
wav_echo_path = join(target_dir, wav_echo)
wav_echo_len = synthesize_and_write(audio, rate, wav_echo_path, echo=echo, force=force)
files.append((wav_echo, getsize(wav_echo_path), wav_echo_len, segment.transcript))
higher = random.uniform(1.5, 5)
wav_high_path = join(target_dir, wav_high)
wav_high_len = synthesize_and_write(audio, rate, wav_high_path, pitch=higher, force=force)
files.append((wav_high, getsize(wav_high_path), wav_high_len, segment.transcript))
lower = random.uniform(-5, -1.5)
wav_low_path = join(target_dir, wav_low)
wav_low_len = synthesize_and_write(audio, rate, wav_low_path, pitch=lower, force=force)
files.append((wav_low, getsize(wav_low_path), wav_low_len, segment.transcript))
faster = random.uniform(1.2, 1.6)
wav_fast_path = join(target_dir, wav_fast)
wav_fast_len = synthesize_and_write(audio, rate, wav_fast_path, tempo=faster, force=force)
files.append((wav_fast, getsize(wav_fast_path), wav_fast_len, segment.transcript))
slower = random.uniform(0.6, 0.8)
wav_slow_path = join(target_dir, wav_slow)
wav_slow_len = synthesize_and_write(audio, rate, wav_slow_path, tempo=slower, force=force)
files.append((wav_slow, getsize(wav_slow_path), wav_slow_len, segment.transcript))
louder = random.randint(5, 15)
wav_loud_path = join(target_dir, wav_loud)
wav_loud_len = synthesize_and_write(audio, rate, wav_loud_path, volume=louder, force=force)
files.append((wav_loud, getsize(wav_loud_path), wav_loud_len, segment.transcript))
quieter = random.randint(-15, 5)
wav_quiet_path = join(target_dir, wav_quiet)
wav_quiet_len = synthesize_and_write(audio, rate, wav_quiet_path, volume=quieter, force=force)
files.append((wav_quiet, getsize(wav_quiet_path), wav_quiet_len, segment.transcript))
description = wav_path
if max_dur:
description += f' {timedelta(seconds=sum_duration)}'
progress.set_description(description)
if max_dur and sum_duration > max_dur * 60:
break
sum_duration = sum(getitem(t, 2) for t in files)
if synthesize or min_dur and sum_duration < min_dur * 60 or max_dur and sum_duration < max_dur * 60:
print(f'total length: {timedelta(seconds=sum_duration)}')
print(f'filling up with distorted data until {timedelta(minutes=1000)} is reached')
i = 0
while sum_duration < 1000 * 60:
i += 1
for segment_id, audio, rate, transcript in tqdm(segments, unit=' segments'):
shift = random.uniform(0.5, 1.5)
pitch = random.uniform(-5, 5)
tempo = random.uniform(0.6, 1.6)
volume = random.randint(-15, 15)
echo = random.randint(30, 100)
wav_distort = f'{segment_id}-distorted-{i}.wav'
wav_distort_path = join(target_dir, wav_distort)
wav_distort_len = synthesize_and_write(audio, rate, wav_distort_path, shift=shift, pitch=pitch,
tempo=tempo, volume=volume, echo=echo, force=force)
files.append((wav_distort, getsize(wav_distort_path), wav_distort_len, transcript))
sum_duration += wav_distort_len
if sum_duration > 1000 * 60:
break
print(f'total length: {timedelta(seconds=sum_duration)}')
return pd.DataFrame(data=files, columns=['wav_filename', 'wav_filesize', 'wav_length', 'transcript']).sort_values(
'wav_length')
def synthesize_and_write(audio, rate, wav_path, shift=0, pitch=0, tempo=1, volume=0, echo=0, force=False):
audio_synth = distort_audio(audio, rate,
shift_s=shift,
pitch_factor=pitch,
tempo_factor=tempo,
volume=volume,
echo=echo)
if not exists(wav_path) or not getsize(wav_path) or force:
sf.write(wav_path, audio_synth, rate, subtype='PCM_16')
return len(audio_synth) / rate
def compute_features(csv_train, csv_valid, csv_test, target_dir, force):
df_train = pd.read_csv(csv_train)
df_dev = pd.read_csv(csv_valid)
df_test = pd.read_csv(csv_test)
h5_file_path = join(target_dir, 'features_mfcc.h5')
if exists(h5_file_path) and force:
remove(h5_file_path)
if not exists(h5_file_path):
with h5py.File(h5_file_path) as h5_file:
create_subset(h5_file, 'train', df_train)
create_subset(h5_file, 'test', df_dev)
create_subset(h5_file, 'valid', df_test)
def create_subset(h5_file, name, df):
h5_file.create_dataset(f'{name}/features', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=np.float32))
h5_file.create_dataset(f'{name}/labels', shape=(0,), maxshape=(None,), dtype=h5py.special_dtype(vlen=str))
h5_file.create_dataset(f'{name}/durations', shape=(0,), maxshape=(None,))
progress = tqdm(zip(df['wav_filename'], df['wav_filesize'], df['transcript']), total=len(df.index))
for wav_file_path, wav_file_size, transcript in progress:
progress.set_description(f'{name}: {wav_file_path}')
inputs = h5_file[name]['features']
labels = h5_file[name]['labels']
durations = h5_file[name]['durations']
rate, audio = wavfile.read(wav_file_path)
inp = mfcc(audio, samplerate=rate, numcep=26) # (num_timesteps x num_features)
inputs.resize(inputs.shape[0] + 1, axis=0)
inputs[inputs.shape[0] - 1] = inp.flatten().astype(np.float32)
labels.resize(labels.shape[0] + 1, axis=0)
labels[labels.shape[0] - 1] = transcript
durations.resize(durations.shape[0] + 1, axis=0)
durations[durations.shape[0] - 1] = wav_file_size
if __name__ == '__main__':
main(args)
| StarcoderdataPython |
11213103 | # webcam.py
import cv2
import time
import ipdb
from object_detection.object_detection import ObjDet
from pose_estimation.pose_estimation import Pose
from activity_recognition.activity_recognition import ActRec
from distance import get_shoulder_dist_from_pe, get_obj_obj_dist
class TimeMeter():
''' Handle time measurements. There are two modes available "time" and "fps".
'''
def __init__(self, display_time=2, topic=None, verbose=False):
self.topic = topic
self.verbose = verbose
self.display_time = display_time
self.reset()
def reset(self):
''' Reset time counting.'''
self.start_time = time.time()
self.avg = 0
self.sum = 0
self.temp_count = 0
self.acc_count = 0
self.time = 0
self.fps = 0
def start(self):
self.start_time = time.time()
def count(self):
''' Evaluated time lapse betweed calculations.
'''
# update time
self.time = time.time() - self.start_time
# save acc sum of time elapsed
self.sum = self.sum + self.time
# temp count
self.temp_count+=1
# acc count
self.acc_count+=1
# avg update
self.avg = self.sum / self.acc_count
# verbosity
if self.verbose and self.topic != None:
print(f"{self.topic} | cur time: {self.time:.2f} | avg time: {self.avg}")
# if it's display time, calculate fps
if self.time > self.display_time:
# calculate fps
self.fps = round(self.temp_count / self.time, ndigits=2)
# verbosity
if self.verbose:
print(f"FPS: {self.fps}")
# clear frame count
self.temp_count = 0
# restart time counting
self.start()
def flush_var_in_frame(frame, flush_var):
# Get frame dimensions
(w, h, c) = frame.shape
# Initialize positions
x, y = [10, 10]
# Define font size
font_size = 25
# Define x slide
x_slide = 120
# Loop over variables
for key in flush_var:
# Check if the key is a list or a value
if not isinstance(flush_var[key], list):
# Calculate postion
y+=font_size
# Flush info on frame
frame = cv2.putText(frame, f"{key}: {flush_var[key]}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 1)
# If got in the end of vertical axis
if (y + font_size >= h):
# slide in x axis
x+=x_slide
else:
# Text with key point pre defined
value, x1, y1, cx, cy = flush_var[key]
# Check points (x, y)
y1 = h if y1 > h else y1
x1 = w if x1 > w else x1
y1 = 0 if y1 < 0 else y1
x1 = 0 if x1 < 0 else x1
# Flush info on frame
frame = cv2.putText(frame, f"{key}: {value}", (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 255), 1)
frame = cv2.circle(frame, (cx, cy), 1, (0, 255, 255)) # comment
return frame
def main_cam(opt):
''' Run main script with webcam.
'''
# Create a dict to save flush variables
flush_var = {}
# Instanciate the video capture object cap
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("We cannot open webcam.")
# Initialize fps calculator
fps_meter = TimeMeter(verbose=opt.verbose)
if opt.od:
# Instanciate the Object Detector Model
OD_obj = ObjDet(opt)
# Initialize detection timing
OD_meter = TimeMeter(topic='Object Detection', verbose=opt.verbose)
if opt.pe:
# Instanciate the Pose Estimation Model
PE_obj = Pose(opt)
# Initialize detection timing
PE_meter = TimeMeter(topic='Pose Estimation', verbose=opt.verbose)
if opt.ar:
# Instanciate the Pose Estimation Model
AR_obj = ActRec(opt)
# Initialize detection timing
AR_meter = TimeMeter(topic='Activity Recognition', verbose=opt.verbose)
# Main loop to flush webcam image
while True:
# Read from webcam
_, frame = cap.read()
# Save frame in another variable to keep original frame
img = frame.copy()
# AR Evaluate
if opt.ar:
# Save img in batch, because activity recognition needs 16 consecutives frames to execute
AR_obj.save_in_clip(img)
# Evaluate Models every X frames
if fps_meter.acc_count % opt.webcam_calc_x_frames == 0:
# AR Evaluate
if opt.ar:
# Start time count for PE
AR_meter.start()
# Apply Detection
act_scr, act_cls = AR_obj.do_detect()
# Save class for flush in image
if act_cls:
flush_var['action']=act_cls
# Do counting for PE
AR_meter.count()
# OD Evaluate
if opt.od:
# Start time count for OD
OD_meter.start()
# Resize image
img = cv2.resize(img, (opt.od_in_w, opt.od_in_h), interpolation=cv2.INTER_AREA)
# Apply Detection
OD_obj.do_detect(img)
# Print boxes and labels in the image
img = OD_obj.plot_boxes_cv2(img, OD_obj.boxes[0])
# Do counting for OD
OD_meter.count()
# PE Evaluate
if opt.pe:
# Start time count for PE
PE_meter.start()
# Apply Detection
img = PE_obj.do_detect(img)
# Do counting for PE
PE_meter.count()
# Calculate shoulder distance
distances = get_shoulder_dist_from_pe(candidate=PE_obj.body_candidate, subset=PE_obj.body_subset)
for i in range(len(distances)):
flush_var[f"p{i+1}"] = distances[i]
#import ipdb; ipdb.set_trace()
if opt.pe and opt.od:
i=0
for objectd in OD_obj.distances:
if len(distances)>0 and objectd.label == 'cup':
flush_var[f"p_obj{i+1}"] = get_obj_obj_dist(obj1_pos=[distances[0][3], distances[0][4]],
obj2_pos=[objectd.centerx, objectd.centery],
img_pos=[int(img.shape[1]/2), int(img.shape[0]/2)],
obj1_dist=distances[0][0],
obj2_dist=objectd.dist,
factor=objectd.factor)
i+=1
# Calculate FPS
fps_meter.count()
flush_var['fps']=fps_meter.fps
# Flush variables in frame
img = flush_var_in_frame(img, flush_var)
# Clean Flush Dict
flush_var = {"action": flush_var['action']} if 'action' in flush_var.keys() else {}
# Keep flushing OD result
if opt.od:
# Print boxes and labels in the image
img = OD_obj.plot_boxes_cv2(img, OD_obj.boxes[0])
# Show frame with flushes
cv2.imshow("Web cam input", img)
# Stop key 'q' to exit webcam input
if cv2.waitKey(25) & 0xFF == ord("q"):
break
# Release webcam and close all cv2 windows
cap.release()
cv2.destroyAllWindows() | StarcoderdataPython |
9764836 | #!/usr/bin/env python
# Density_Sampling/Density_Sampling.py
# Author: <NAME> for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: <EMAIL>, <EMAIL>
"""For a data-set comprising a mixture of rare and common populations,
density sampling gives equal weights to selected representatives
of those distinct populations.
Density sampling is a balancing act between signal and noise. Indeed, while
it increases the prevalence of rare populations, it also increases the prevalence
of noisy sample points that would happen to have their local densities larger than
an outlier density computed by Density_Sampling.
An illustration of how to use the present module is in order:
>>> iris = datasets.load_iris()
>>> Y = iris.target
>>> X_reduced = PCA(n_components = 3).fit_transform(iris.data)
>>> plot_PCA(X_reduced, Y, 'the whole Iris data-set')
>>> sampled_indices = density_sampling(X_reduced, metric = 'euclidean', desired_samples = 50)
>>> downsampled_X_reduced = X_reduced[sampled_indices, :]
>>> downsampled_Y = Y[sampled_indices]
>>> plot_PCA(downsampled_X_reduced, downsampled_Y, 'the Iris data-set\ndown-sampled to about 50 samples')
Reference
---------
<NAME>., <NAME>., <NAME>. and <NAME>.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
"""
import numbers
import numpy as np
import operator
import psutil
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import kneighbors_graph
from sklearn.neighbors import radius_neighbors_graph
from sys import exit
from tempfile import NamedTemporaryFile
__all__ = ['get_local_densities', 'density_sampling']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory().__dict__.iteritems():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Density_Sampling: get_chunk_size: this machine does not "
"have enough free memory.\n")
exit(1)
def median_min_distance(data, metric):
"""This function computes a graph of nearest-neighbors for each sample point in
'data' and returns the median of the distribution of distances between those
nearest-neighbors, the distance metric being specified by 'metric'.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
metric : string
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
Returns
-------
median_min_dist : float
The median of the distribution of distances between nearest-neighbors.
"""
data = np.atleast_2d(data)
nearest_distances = kneighbors_graph(data, 1, mode = 'distance', metric = metric, include_self = False).data
median_min_dist = np.median(nearest_distances, overwrite_input = True)
return round(median_min_dist, 4)
def get_local_densities(data, kernel_mult = 2.0, metric = 'manhattan'):
"""For each sample point of the data-set 'data', estimate a local density in feature
space by counting the number of neighboring data-points within a particular
region centered around that sample point.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
kernel_mult : float, optional (default = 2.0)
The kernel multiplier, which determine (in terms of the median of the distribution
of distances among nearest neighbors) the extent of the regions centered
around each sample point to consider for the computation of the local density
associated to that particular sample point.
metric : string, optional (default = 'manhattan')
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
Returns
-------
local_densities : array of shape (n_samples,)
The i-th entry of this vector corresponds to the local density of the i-th sample
point in the order of the rows of 'data'.
"""
data = np.atleast_2d(data)
assert isinstance(kernel_mult, numbers.Real) and kernel_mult > 0
kernel_width = kernel_mult * median_min_distance(data, metric)
N_samples = data.shape[0]
if 8.0 * get_chunk_size(N_samples, 1) > N_samples:
A = radius_neighbors_graph(data, kernel_width, mode = 'connectivity', metric = metric, include_self = True)
rows, _ = A.nonzero()
with NamedTemporaryFile('w', delete = True, dir = './') as file_name:
fp = np.memmap(file_name, dtype = int, mode = 'w+', shape = rows.shape)
fp[:] = rows[:]
_, counts = np.unique(fp, return_counts = True)
local_densities = np.zeros(N_samples, dtype = int)
for i in xrange(N_samples):
local_densities[i] = counts[i]
else:
local_densities = np.zeros(N_samples, dtype = int)
chunks_size = get_chunk_size(N_samples, 2)
for i in xrange(0, N_samples, chunks_size):
chunk = data[i:min(i + chunks_size, N_samples)]
D = pairwise_distances(chunk, data, metric, n_jobs = 1)
D = (D <= kernel_width)
local_densities[i + np.arange(min(chunks_size, N_samples - i))] = D.sum(axis = 1)
return local_densities
def density_sampling(data, local_densities = None, metric = 'manhattan',
kernel_mult = 2.0, outlier_percentile = 0.01,
target_percentile = 0.05, desired_samples = None):
"""The i-th sample point of the data-set 'data' is selected by density sampling
with a probability given by:
| 0 if outlier_density > LD[i];
P(keep the i-th data-point) = | 1 if outlier_density <= LD[i] <= target_density;
| target_density / LD[i] if LD[i] > target_density.
Here 'LD[i]' denotes the local density of the i-th sample point of the data-set,
whereas 'outlier_density' and 'target_density' are computed as particular percentiles
of that distribution of local densities.
Parameters
----------
data : array of shape (n_samples, n_features)
The data-set, a fraction of whose sample points will be extracted
by density sampling.
local_densities : array of shape (n_samples,), optional (default = None)
The i-th entry of this vector corresponds to the local density of the i-th sample
point in the order of the rows of 'data'.
metric : string, optional (default = 'manhattan')
The distance metric used to determine the nearest-neighbor to each data-point.
The DistanceMetric class defined in scikit-learn's library lists all available
metrics.
kernel_mult : float, optional (default = 2.0)
The kernel multiplier, which determine (in terms of the median of the distribution
of distances among nearest neighbors) the extent of the regions centered
around each sample point to consider for the computation of the local density
associated to that particular sample point.
outlier_percentile : float, optional (default = 0.01)
Specify the outlier density as a percentile of the distribution of local densities.
target_percentile : float, optional (default = 0.05)
Specifiy the target density as a percentile of the distribution of local densities.
Relevant only if 'desired_samples' is left unspecified.
desired_samples : int, optional (default = None)
The number of samples to be selected from the whole data-set such that members
of rare populations and members of more common populations are roughly
equally represented. To that purpose, a target density is computed that to selects about
'desired_samples' data-points.
Returns
-------
samples_kept : array of shape (n_selected_samples,)
If the 'i'-th sample point of 'data' has been selected by a given instance of
density sampling, number 'i' is featured in the array returned by
the present function.
"""
random_state = np.random.RandomState()
data = np.atleast_2d(data)
for x in (kernel_mult, outlier_percentile, target_percentile):
assert isinstance(x, numbers.Real) and x > 0
for x in (outlier_percentile, target_percentile):
assert x <= 1.0
if local_densities is None:
local_densities = get_local_densities(data, kernel_mult, metric)
if reduce(operator.mul, local_densities.shape, 1) != max(local_densities.shape):
raise ValueError("\nERROR: Density_Sampling: density_sampling: problem with "
"the dimensions of the vector of local densities provided.\n")
else:
local_densities = np.reshape(local_densities, local_densities.size)
outlier_density = np.percentile(local_densities, outlier_percentile)
target_density = np.percentile(local_densities, target_percentile)
samples_kept = np.where(local_densities > outlier_density)[0]
N_kept = samples_kept.size
local_densities = local_densities[samples_kept]
if desired_samples is None:
probs = np.divide(target_density + 0.0, local_densities)
ind = np.where(probs > random_state.uniform(size = N_kept))[0]
samples_kept = samples_kept[ind]
elif desired_samples <= N_kept:
sorted_densities = np.sort(local_densities)
temp = np.reciprocal(sorted_densities[::-1].astype(float))
cdf = np.cumsum(temp)[::-1]
target_density = (desired_samples + 0.0) / cdf[0]
if target_density > sorted_densities[0]:
temp = desired_samples - np.arange(1.0, N_kept + 1.0)
possible_targets = np.divide(temp, cdf)
ind = np.argmax(possible_targets < sorted_densities)
target_density = possible_targets[ind]
probs = np.divide(target_density + 0.0, local_densities)
ind = np.where(probs > random_state.uniform(size = N_kept))[0]
samples_kept = samples_kept[ind]
else:
print("\nERROR: Density_Sampling: density_sampling: 'desired_samples' has been "
"assigned a value of {desired_samples}, larger than {N_kept}, "
"the number of samples whose local densities are high enough "
"(i.e. excluded are the local densities in the lowest {outlier_percentile} "
"percentile).\n".format(**locals()))
exit(1)
return samples_kept
if __name__ == '__main__':
import doctest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
from time import sleep
def plot_PCA(X_reduced, Y, title):
fig = plt.figure(1, figsize = (10, 8))
ax = Axes3D(fig, elev = -150, azim = 110)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c = Y,
cmap = plt.cm.Paired)
ax.set_title('First three PCA direction for {title}'.format(**locals()))
ax.set_xlabel('1st eigenvector')
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel('2nd eigenvector')
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel('3rd eigenvector')
ax.w_zaxis.set_ticklabels([])
plt.show(block = False)
sleep(3)
plt.close()
doctest.testmod()
| StarcoderdataPython |
3569636 | #!/usr/bin/env python3
import argparse
import cfg_sample
import csv
import random
def main(args):
data = list(import_proverb_dataset(args.dataset))
print(f"tags: {args.tags}")
print(f"Datalen {len(data)}. Example: {data[0]}")
print(data[0:20])
def gen_hint_combos(tags, chance):
hint_prompt = ""
for tag in tags:
if random.random() < chance:
hint_prompt = f"{hint_prompt} | {tag}"
return hint_prompt
tag_combinations = [gen_hint_combos(args.tags, args.tag_threshold) for i in range(1, 5)]
for example in data:
for extra_tags in tag_combinations:
args.prompts = [f"{example}{extra_tags}:5"]
print(f"Trying cfg_sample with: {args.prompts}")
cfg_sample.main(args)
def import_proverb_dataset(dataset_path: str):
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.reader(fIn, delimiter=",", quoting=csv.QUOTE_MINIMAL)
next(reader) # Skip top column name
for row in reader:
yield row[1]
if __name__ == "__main__":
p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument("--images", type=str, default=[], nargs="*", metavar="IMAGE", help="the image prompts")
p.add_argument("--batch-size", "-bs", type=int, default=1, help="the number of images per batch")
p.add_argument("--checkpoint", type=str, help="the checkpoint to use")
p.add_argument("--device", type=str, help="the device to use")
p.add_argument("--eta", type=float, default=0.0, help="the amount of noise to add during sampling (0-1)")
p.add_argument("--init", type=str, help="the init image")
p.add_argument(
"--method",
type=str,
default="plms",
choices=["ddpm", "ddim", "prk", "plms", "pie", "plms2"],
help="the sampling method to use",
)
p.add_argument("--model", type=str, default="cc12m_1_cfg", choices=["cc12m_1_cfg"], help="the model to use")
p.add_argument("-n", type=int, default=1, help="the number of images to sample")
p.add_argument("--seed", type=int, default=0, help="the random seed")
p.add_argument("--size", type=int, nargs=2, help="the output image size")
p.add_argument(
"--starting-timestep", "-st", type=float, default=0.9, help="the timestep to start at (used with init images)"
)
p.add_argument("--steps", type=int, default=50, help="the number of timesteps")
p.add_argument("--outdir", type=str, default="./generated-images/", help="Directory to save output files to")
p.add_argument(
"--tags", type=str, default=[], nargs="*", help="extra prompts to append in random combinations to prompt"
)
p.add_argument("--tag-threshold", type=float, default=0.4, help="chance to randomly append to prompt")
p.add_argument("--dataset", type=str, default="datasets/", help="Directory to save output files to")
args = p.parse_args()
main(args)
| StarcoderdataPython |
9705609 | from nlptools.preprocessing import *
# Use doQuoteNormalization to normalize all quotation to ASCII ' and " chars:'
t = "« Hello World » I’m ``Mick´´"
print(preprocess(t, doQuoteNormalization=True))
# Remove the html and unescape the html:
t = "Hello <span>World</span>! ><"
print(preprocess(t, removeHtml=True, unescapeHtml=True))
# Reduce black spaces, remove accents and reduce long char sequences:
t = " Hello Béà \t !!!!!!! \n\n \n Fine?"
print(preprocess(t, doReduceBlank=True, stripAccents=True, doReduceCharSequences=True, charSequencesMaxLength=3))
# Normalize emojis to the utf-8 char
t = ":-) These are emojis ;) 😣 😀 :o)"
print(preprocess(t, doNormalizeEmojis=True))
# Normalize specific chars (for example ━, 一 and – are replaced by the normal form —, … is repaced by ...):
t = "Hello guy… I am━actually not━ok!"
print(preprocess(t, doSpecialMap=True))
# Remove unknown chars and badly encoded chars:
t = "Hello 10€ and 8$ ® 字 � are 20% µ ç"
print(preprocess(t, doBadlyEncoded=True, replaceUnknownChars=True, doReduceBlank=True, stripAccents=True))
# Remove urls:
t = "Pls visit http://test.com !!"
print(preprocess(t, doRemoveUrls=True))
| StarcoderdataPython |
4972788 | import os
import fnmatch
def getFileList(path_to_folder):
matches = []
for root, dirnames, filenames in os.walk(path_to_folder):
for filename in fnmatch.filter(filenames, '*.*'):
matches.append(os.path.join(root, filename))
return matches | StarcoderdataPython |
4999496 | from fastapi.exceptions import HTTPException
from fastapi import APIRouter, status, Body, Depends
from fastapi.encoders import jsonable_encoder
from fastapi.security import HTTPBasicCredentials
from app.models.common import *
from app.models.user import *
from app.database.crud.user import *
from app.database.crud.brand import retrive_brands_of_user
from app.middlewares.auth.jwt_bearer import JWTBearer
from app.database.crud.app import retrive_apps_of_user
router = APIRouter()
token_listener = JWTBearer()
@router.post("/login/")
async def login(credentials: HTTPBasicCredentials = Body(...)):
user = await user_login(jsonable_encoder(credentials))
if user == False:
return ErrorResponseModel(
"An error occurred",
status.HTTP_403_FORBIDDEN,
"Incorrect email or password.",
)
return ResponseModel(user, "Successfully logged in!")
@router.post("/register/", response_description="Account created.")
async def user_registration(user: UserModel = Body(...)):
new_user = await create_user(jsonable_encoder(user))
if new_user == False:
return ErrorResponseModel(
"An error occurred", status.HTTP_409_CONFLICT, "Email already in use."
)
return ResponseModel(new_user, "User created successfully")
@router.get(
"/",
dependencies=[Depends(token_listener)],
response_description="Users are retrieved.",
)
async def get_users():
users = await retrieve_users()
return (
ResponseModel(users, "Users data retrieved successfully")
if len(users) > 0
else ResponseModel(users, "Empty list returned")
)
@router.get(
"/{id}/",
dependencies=[Depends(token_listener)],
response_description="User data retrieved.",
)
async def get_user(id):
user = await retrieve_user(id)
return (
ResponseModel(user, "User data retrieved successfully")
if user
else ErrorResponseModel("An error occured.", 404, "Student doesn't exist.")
)
@router.put(
"/{id}/",
dependencies=[Depends(token_listener)],
response_description="User data updated in the database.",
)
async def update_user(id: str, user: UpdateUserModel = Body(...)):
user = jsonable_encoder(user)
updated_user = await update_user_data(id, user)
return (
ResponseModel({"id": id}, "User updated successfully")
if updated_user
else ErrorResponseModel(
"An error occurred",
status.HTTP_404_NOT_FOUND,
f"There was an error updating the user {id}",
)
)
@router.get(
"/{id}/brands/",
dependencies=[Depends(token_listener)],
response_description="User brands are retrieved.",
)
async def get_brands_of_users(id):
brands = await retrive_brands_of_user(id)
return (
ResponseModel(brands, "User brands are successfully")
if brands
else ErrorResponseModel("An error occured.", 404, "Student doesn't exist.")
)
@router.get(
"/{id}/apps/",
dependencies=[Depends(token_listener)],
response_description="User brands are retrieved.",
)
async def get_apps_of_user(id):
apps = await retrive_apps_of_user(id)
return (
ResponseModel(apps, "User brands are successfully")
if apps
else ErrorResponseModel("An error occured.", 404, "Student doesn't exist.")
)
| StarcoderdataPython |
4921588 | <reponame>tageerBOY/discord_py<filename>plugins/Example1/program.py
#Plugin1
#me
#v1.0.0
@bot.command()
async def test(ctx):
print('test')
#Plugin1 | StarcoderdataPython |
6489779 | """
Listas Aninhadas
- Algumas linguagens de programação possuem uma estrutura de dados chamadas de arrays:
- Unidimensionais (Arrays/Vetores);
- Multidimensionais (Matrizes);
Em Python nós temos as Listas
numeros = [1, 'b', 3.234, True, 5]
print(listas)
print(type(listas))
# Exemplos
listas = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # Matriz 3x3
# Como fazemos para acessar os dados?
print(listas[0][1]) # 2
print(listas[2][1]) # 8
# Iterando com loops em uma lista aninhada
for lista in listas:
for num in lista:
print(num)
# List Comprehension
[[print(valor) for valor in lista] for lista in listas]
"""
# Outros exemplos
# Gerando um tabuleiro/matriz 3x3
tabuleiro = [[numero for numero in range(1, 4)] for valor in range(1, 4)]
print(tabuleiro)
# Gerando jogadas para o jogo da velha
velha = [['X' if numero % 2 == 0 else 'O' for numero in range(1, 4)] for valor in range(1, 4)]
[print(linha) for linha in velha]
# Gerando valores iniciais
print([['*' for i in range(1, 4)] for j in range(1, 4)])
| StarcoderdataPython |
5130612 | from psireporter.worker import Report
import platform, calendar
from datetime import datetime
class PsistatsReport(Report):
def __new__(cls, **kwargs):
return tuple.__new__(cls, (
kwargs.get('id', platform.node()),
kwargs.get('message', None),
kwargs.get('sender', None),
calendar.timegm(datetime.utcnow().utctimetuple())
))
| StarcoderdataPython |
9665543 | <filename>pyluos/__init__.py
import logging
from .device import Device
from .services import *
nh = logging.NullHandler()
logging.getLogger(__name__).addHandler(nh)
| StarcoderdataPython |
170021 | <filename>services/web/api/db_threads.py
import os
import uuid
from common.dbinstance import DbInstance
from common.s3client import S3Client
def select_threads(board_id, limit, offset):
# prepare result
result = {
'status': 404,
'data': []
}
# fetch rows from db
with DbInstance().get_instance().cursor() as cursor:
cursor.execute("""
SELECT
t.id AS id,
t.board_id AS board_id,
t.thread_id AS thread_id,
t.data_message AS data_message,
t.data_filepath AS data_filepath,
t.data_thumbpath AS data_thumbpath,
DATE_FORMAT(t.datetime_created, '%%m/%%d/%%y(%%a)%%T') AS datetime_created,
DATE_FORMAT(t.timestamp_edited, '%%m/%%d/%%y(%%a)%%T') AS timestamp_edited,
(
SELECT
b.data_reason
FROM bans AS b
WHERE
b.post_id = t.id
ORDER BY b.datetime_ends IS NULL DESC, b.datetime_ends DESC
LIMIT 1
) AS ban_reason
FROM posts AS t
WHERE (t.board_id = %s AND t.thread_id IS NULL) AND t.deleted = false
ORDER BY t.timestamp_bumped DESC
LIMIT %s OFFSET %s
""", (board_id, limit, offset,))
result['data'] = cursor.fetchall()
if result['data']:
for item in result['data']:
cursor.execute("""
SELECT
p.id AS id,
p.board_id AS board_id,
p.thread_id AS thread_id,
p.data_message AS data_message,
p.data_filepath AS data_filepath,
p.data_thumbpath AS data_thumbpath,
DATE_FORMAT(p.datetime_created, '%%m/%%d/%%y(%%a)%%T') AS datetime_created,
DATE_FORMAT(p.timestamp_edited, '%%m/%%d/%%y(%%a)%%T') AS timestamp_edited,
(
SELECT
b.data_reason
FROM bans AS b
WHERE
b.post_id = p.id
ORDER BY b.datetime_ends IS NULL DESC, b.datetime_ends DESC
LIMIT 1
) AS ban_reason
FROM posts AS p
WHERE (p.board_id = %s AND p.thread_id = %s) AND p.deleted = false
ORDER BY p.datetime_created DESC
LIMIT 3 OFFSET 0
""", (board_id, item['id']))
item['posts'] = sorted(cursor.fetchall(), key=lambda p: p['id'])
# update result
if result['data']:
result['status'] = 200
return result
def insert_thread(board_id, thread, ipv4_addr):
# prepare result
result = {
'status': 400,
'data': None
}
# check if user has permission to create thread
with DbInstance().get_instance().cursor() as cursor:
# check 403 (banned)
cursor.execute("""
SELECT
CASE
WHEN b.datetime_ends IS NOT NULL THEN (
b.datetime_ends < CURRENT_TIMESTAMP
)
ELSE (
false
)
END AS permission
FROM bans AS b
WHERE b.banned_ipv4_addr = INET_ATON(%s)
ORDER BY b.datetime_ends IS NULL DESC, b.datetime_ends DESC
LIMIT 1
""", (ipv4_addr,))
permitted_403 = cursor.fetchone()
permitted_403 = True if permitted_403 is None else permitted_403['permission'] == 1
# check 429 (too many requests)
cursor.execute("""
SELECT
a.timestamp_created_thread < DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 15 SECOND) AS permission
FROM anons AS a
WHERE a.ipv4_addr = INET_ATON(%s)
""", (ipv4_addr,))
permitted_429 = cursor.fetchone()
permitted_429 = True if permitted_429 is None else permitted_429['permission'] == 1
# create thread if permitted
if permitted_403 and permitted_429:
# if requested, generate presigned s3 POST url for the file
file_upload_info = {
'url': None,
'fields': {
'key': None
},
}
if 'extension' in thread and thread['extension'] is not None:
file_upload_info = S3Client().instance.generate_presigned_post(
os.getenv('MEDIA_BUCKET'),
str(uuid.uuid4()) + '.' + thread['extension'],
Fields={
'acl': 'public-read',
'Content-Type': 'image/' + thread['extension']
},
Conditions=[
{'acl': 'public-read'},
{'Content-Type': 'image/' + thread['extension']},
['content-length-range', 128, 4096000]
],
ExpiresIn=60
)
# insert/update ipv4_addr row
rows_anon = cursor.execute("""
INSERT INTO anons (ipv4_addr, timestamp_created_thread) VALUES (INET_ATON(%s), CURRENT_TIMESTAMP)
ON DUPLICATE KEY UPDATE timestamp_created_thread=CURRENT_TIMESTAMP
""", (ipv4_addr,))
# insert thread
rows_thread = cursor.execute("""
INSERT INTO posts (board_id, data_message, data_filepath, ipv4_addr)
VALUES (%s, %s, %s, INET_ATON(%s))
""", (board_id, thread['message'], file_upload_info['fields']['key'], ipv4_addr,))
id_inserted = cursor.lastrowid
# commit if ok
if rows_anon >= 1 and rows_thread == 1:
cursor.connection.commit()
result['status'] = 201
result['data'] = {
'id': id_inserted,
'url': file_upload_info['url'],
'fields': file_upload_info['fields']
}
else:
if not permitted_403:
result['status'] = 403
result['data'] = {
'message': 'you are banned'
}
else:
result['status'] = 429
result['data'] = {
'message': 'too many requests'
}
return result
| StarcoderdataPython |
11213744 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='gym_trainer',
packages=find_packages('gym_trainer'),
author='<NAME>',
version='0.1.0',
install_requires=[
'gym==0.10.9', 'matplotlib==3.0.2', 'chainer==5.1.0', 'cupy==5.1.0', 'torch==1.0.0', 'torchvision==0.2.1',
'optuna==0.4.0'
]
) | StarcoderdataPython |
1836411 | <gh_stars>0
from django.apps import AppConfig
class RelationshipsConfig(AppConfig):
name = 'relationships'
| StarcoderdataPython |
6547818 | import urllib.request
import string
import random
import project
from os import path
import json
from PIL import Image
def get_schedule_path() -> str:
return f"{project.get_dir()}/scheduled.json"
def load() -> dict:
file = get_schedule_path()
if path.exists(file):
with open(file, 'r') as fp:
return json.load(fp)
else:
return {}
def save(data: dict):
with open(get_schedule_path(), 'w') as fp:
json.dump(data, fp)
def random_code() -> str:
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
def schedule(url: str, caption: str, tags: str):
code = random_code()
image_path = f"{project.get_content_dir()}/{code}.jpg"
_, ext = path.splitext(url)
retrieve_as_jpg(url, image_path, ext)
fix_aspect_ratio(image_path)
saved = load()
saved[code] = {
'image': image_path,
'caption': caption,
'tags': ("".join(tags.split())).split(',') # Remove all whitespace and then separate by ,
}
save(saved)
print(f"'{url}' has been scheduled succesfully.")
def retrieve_as_jpg(url: str, image_path: str, ext: str):
tmp_path = f"{image_path}{ext}"
urllib.request.urlretrieve(url, tmp_path)
im = Image.open(tmp_path)
if not im.mode == 'RGB':
im = im.convert('RGB')
im.save(image_path, quality=95)
# https://stackoverflow.com/questions/4744372/reducing-the-width-height-of-an-image-to-fit-a-given-aspect-ratio-how-python
def fix_aspect_ratio(image_path: str):
image = Image.open(image_path)
width = image.size[0]
height = image.size[1]
aspect = width / float(height)
ideal_aspect = 1.90 # Instagram's minimum aspect ratio https://help.instagram.com/1469029763400082
if aspect > ideal_aspect:
# Then crop the left and right edges:
new_width = int(ideal_aspect * height)
offset = (width - new_width) / 2
resize = (offset, 0, width - offset, height)
else:
# ... crop the top and bottom:
new_height = int(width / ideal_aspect)
offset = (height - new_height) / 2
resize = (0, offset, width, height - offset)
cropped = image.crop(resize)
cropped.save(image_path)
| StarcoderdataPython |
11327013 | <gh_stars>1-10
import os
import unittest
from conans.model.ref import ConanFileReference
from conans.paths import EXPORT_SOURCES_DIR_OLD
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient, TestServer
from conans.util.files import tar_extract
class DoNotKeepOldExportSourcesLayoutTest(unittest.TestCase):
def test_basic(self):
""" check that we do not generate anymore tgz with .c_src.
also, they are not present any more in the cache layout, even if they come from a .c_src
tgz server file
"""
test_server = TestServer()
servers = {"default": test_server}
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")]})
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name= "Pkg"
version = "0.1"
exports_sources = "*.txt"
""", "myfile.txt": "Hello world"})
client.run("export . lasote/testing")
client.run("upload Pkg/0.1@lasote/testing")
client.run("remove * -f")
client.run("search")
self.assertIn("There are no packages", client.user_io.out)
ref = ConanFileReference.loads("Pkg/0.1@lasote/testing")
rev, _ = servers["default"].server_store.get_last_revision(ref)
ref = ref.copy_with_rev(rev)
path = test_server.server_store.export(ref)
sources_tgz = os.path.join(path, "conan_sources.tgz")
self.assertTrue(os.path.exists(sources_tgz))
folder = temp_folder()
with open(sources_tgz, 'rb') as file_handler:
tar_extract(file_handler, folder)
self.assertEqual(os.listdir(folder), ["myfile.txt"])
# Now install again
client.run("install Pkg/0.1@lasote/testing --build=missing")
export = client.cache.package_layout(ref).export()
self.assertNotIn(EXPORT_SOURCES_DIR_OLD, os.listdir(export))
export_sources = client.cache.package_layout(ref).export_sources()
self.assertEqual(os.listdir(export_sources), ["myfile.txt"])
| StarcoderdataPython |
3202205 | <filename>template_server_serializer.py
import discord
def getPermissionJson(name, value):
return {
"permissionName": name,
"allow": value
}
def getCategoryJson(category):
return {
"name": category.name,
"type": str(category.type),
"nsfw": category.is_nsfw(),
"permissions": [getChannelPermissionJson(role, category.overwrites[role]) for role in category.overwrites.keys()]
}
def getChannelPermissionJson(role, perms):
return {
"roleName": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(perms) if value != None]
}
def getRoleJson(role):
assert type(role) == discord.Role
permissions = role.permissions
return {
"name": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(role.permissions)],
"settings": {
"color": list(role.color.to_rgb()),
"mention": role.mentionable,
"displaySeparate": role.hoist
}
}
def getTextChannelJson(text_channel):
assert type(text_channel) == discord.TextChannel
return {
"name": text_channel.name,
"topic": text_channel.topic,
"position": text_channel.position,
"nsfw": text_channel.is_nsfw(),
"slowmode_delay": text_channel.slowmode_delay,
"permissions": [getChannelPermissionJson(role, text_channel.overwrites[role]) for role in text_channel.overwrites.keys()],
"categoryName": text_channel.category.name if text_channel.category else None
}
def getVoiceChannelJson(voice_channel):
assert type(voice_channel) == discord.VoiceChannel
return {
"name": voice_channel.name,
"position": voice_channel.position,
"bitrate": voice_channel.bitrate,
"user_limit": voice_channel.user_limit,
"permissions": [getChannelPermissionJson(role, voice_channel.overwrites[role]) for role in voice_channel.overwrites.keys()],
"categoryName": voice_channel.category.name if voice_channel.category else None
}
def getServerJson(server):
"""
Converts the given server into a template JSON following the template_server_schema.json format.
"""
assert type(server) == discord.Guild, "server must be discord.Guild, not: " + str(type(server))
d = {}
d['serverName'] = server.name
d['roles'] = []
for r in server.roles:
d['roles'].append(getRoleJson(r))
d['categories'] = []
for c in server.categories:
d['categories'].append(getCategoryJson(c))
d['textChannels'] = []
for t in server.text_channels:
d['textChannels'].append(getTextChannelJson(t))
d['voiceChannels'] = []
for v in server.voice_channels:
d['voiceChannels'].append(getVoiceChannelJson(v))
return d
| StarcoderdataPython |
373353 | #!/usr/bin/env python
"""
Repro
-----
This is a module about repro
"""
from sdk.foo import foo
def handler(event, context):
"""Make a request and print it"""
resp = foo()
print(resp)
if __name__ == '__main__':
handler('a', 'b') | StarcoderdataPython |
103415 | <filename>testing/test_xml.py
import py
from testing.test_interpreter import BaseTestInterpreter
py.test.skip("xml module unavailable")
class TestXML(BaseTestInterpreter):
def test_basic_parse(self):
output = self.run("""
$parser = xml_parser_create();
$x = "";
function callback($parser, $arg) {
global $x;
$x .= $arg;
}
xml_set_character_data_handler($parser, 'callback');
xml_parse($parser, "<a><b>c</b></a>", true);
echo $x;
""")
assert self.unwrap(output[0]) == "<b>c</b>"
| StarcoderdataPython |
3225844 | import numpy as np
I = 100
J = 2
beta_0 = -1
beta_1 = 1
sigma_u = 0.5
sigma_eps = 1
iteration = 30
initial = [0, 0, 5, 5]
result = []
def xE(E_t):
return sum(sum(value_x * np.array([E_t, E_t]).T))
def eps_right(E_t, beta0, beta1):
return (sum(sum((value_y - beta0-beta1*value_x)**2)) + J * sum(E_t**2) - 2*sum(sum((value_y - beta0 - beta1*value_x) * np.array([E_t, E_t]).T)))/(I*J)
for i in range(500):
# data generation
np.random.seed(i)
value_x = np.random.normal(0, 1, (I, J))
u = np.random.normal(0, sigma_u, I)
value_u = np.ones((I, J))
value_u[:, 0] = u
value_u[:, 1] = u
value_eps = np.random.normal(0, sigma_eps, (I, J))
value_y = beta_0 + beta_1 * value_x + value_u + value_eps
XY = sum(sum(value_y * value_x))
X = sum(sum(value_x))
Y = sum(sum(value_y))
X_2 = sum(sum(value_x * value_x))
# estimation
E_t = np.ones(I)
estimation = np.ones((iteration, 4))
estimation[0, :] = initial
for ite in range(1,iteration):
u = estimation[ite-1, 3]
eps = estimation[ite-1, 2]
beta0 = estimation[ite-1, 0]
beta1 = estimation[ite-1, 1]
for i in range(I):
E_t[i] = u*sum([value_y[i, j] - beta0 - beta1*value_x[i, j] for j in range(J)]) / (J*u + eps)
V_t = eps*u/(J*u + eps)
estimation[ite, 1] = (I*J *XY - Y*X - J * (I*xE(E_t) - sum(E_t)*X))/(I*J*X_2 - X**2)
estimation[ite, 0] = (Y-beta1*X - J* sum(E_t))/(I*J)
estimation[ite, 2] = V_t + eps_right(E_t, estimation[ite-1, 0], estimation[ite-1, 1])
estimation[ite, 3] = V_t + sum(E_t**2)/I
a = estimation[iteration-1, :]
result.append(a)
result = np.array(result)
# change variances into standard deviations
result[:, 2] = np.sqrt(result[:, 2])
result[:, 3] = np.sqrt(result[:, 3])
bias = np.mean(result, axis = 0) - [-1, 1, 1, 0.5]
std = np.std(result, axis = 0)
print(bias)
print(std) | StarcoderdataPython |
1709898 | <filename>src/cdev/project_templates/quick_start/src/hello_world/resources.py<gh_stars>1-10
# Generated as part of Quick Start project template
from cdev.resources.simple.api import Api
from cdev.resources.simple.xlambda import simple_function_annotation
from cdev import Project as cdev_project
myProject = cdev_project.instance()
DemoApi = Api("demoapi")
hello_route = DemoApi.route("/hello_world", "GET")
@simple_function_annotation("hello_world_function", events=[hello_route.event()])
def hello_world(event, context):
print("Hello from inside your Function!")
return {"status_code": 200, "message": "Hello Outside World!"}
myProject.display_output("Base API URL", DemoApi.output.endpoint)
myProject.display_output("Routes", DemoApi.output.endpoints)
| StarcoderdataPython |
360865 | # Generated by Django 3.2.2 on 2021-05-13 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0016_game_torrent_file_size'),
]
operations = [
migrations.AlterField(
model_name='game',
name='torrent_file_size',
field=models.CharField(blank=True, default='КБ', max_length=100, null=True, verbose_name='Размер торрент-файла'),
),
]
| StarcoderdataPython |
3310799 | <filename>exashellcore.py
from platform import platform
import time
import os
import requests
import platform
import subprocess
import psutil
from pystyle import Write, Colors
VERSION = "1.1"
req = requests.get("https://api.github.com/repos/xYanis/ExaShell/releases/latest").json()
lastestVersion = req['tag_name']
if VERSION != lastestVersion:
print(f"[] You are not on the latest version (v{lastestVersion}) !")
Write.Print("[] Loading ExaShell ...", Colors.red_to_purple, interval=0.040)
time.sleep(2)
os.system('cls')
Write.Print("[] Enter help to see all commands of ExaShell ! \n", Colors.yellow_to_red, interval=0.040)
Write.Print("[] Enter clear to clear all output of the console ! \n", Colors.yellow_to_red, interval=0.040)
while True:
base = Write.Input("[] root@ExaShell -> ", Colors.yellow_to_red, interval=0.040)
if base == "help":
Write.Print('╭───────>Help<──────╮\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] help (show this menu)\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] exainfo (show informations about this computer)\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] exaspeed (launch tool for speed test) | SOON\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] exanetconfig (show informations about network) | SOON\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] exapart (launch tool for partitioning) | SOON\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] exit (exit ExaShell)\n', Colors.purple_to_red, interval=0.040)
Write.Print('[->] clear (clear all the console output !)\n', Colors.purple_to_red, interval=0.040)
Write.Print('╰─────────><────────╯\n', Colors.purple_to_red, interval=0.040)
if base == "exainfo":
uname = platform.uname()
Write.Print('╭───────>ExaInfo<──────╮\n', Colors.purple_to_red, interval=0.040)
Write.Print(f'[->] OS : {uname.system}\n', Colors.purple_to_red, interval=0.040)
Write.Print(f'[->] Computer Name : {uname.node}\n', Colors.purple_to_red, interval=0.040)
Write.Print(f'[->] OS Version : {uname.version}\n', Colors.purple_to_red, interval=0.040)
Write.Print(f'[->] CPU : {uname.processor}\n', Colors.purple_to_red, interval=0.040)
Write.Print(f'[->] CPU Architecture : {uname.machine}\n', Colors.purple_to_red, interval=0.040)
Write.Print('╰───────────><─────────╯\n', Colors.purple_to_red, interval=0.040)
if base == "exaspeed":
Write.Print('╭───────>ExaSpeed<──────╮\n', Colors.purple_to_red, interval=0.040)
Write.Print('COMING SOON\n', Colors.purple_to_red, interval=0.040)
if base == "exanetconfig":
Write.Print('╭───────>ExaNetconfig<──────╮\n', Colors.purple_to_red, interval=0.040)
Write.Print('COMING SOON\n', Colors.purple_to_red, interval=0.040)
if base == "exapart":
Write.Print('╭───────>ExaPart<──────╮\n', Colors.purple_to_red, interval=0.040)
Write.Print('COMING SOON\n', Colors.purple_to_red, interval=0.040)
if base == "clear":
os.system('cls')
if base == "exit":
Write.Print('Exiting...', Colors.purple_to_red, interval=0.040)
time.sleep(1)
os.kill()
| StarcoderdataPython |
9614037 | <filename>bench.py
import numpy as np
import time
import cv2
import click
import glob
from imutils.video import FPS
from Events import *
from utils import *
net_path = path.join('data', 'frozen.pb')
predictor_path = path.join('data', 'shape_predictor_68_face_landmarks.dat')
@click.command()
@click.option('--debug', is_flag=True, help='#TODO')
def main(debug):
imgs = glob.glob('im/*.png')
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
net = create_net(net_path)
#net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
detector, predictor = get_detector_and_predictor(predictor_path)
fps = FPS().start()
#_ = vs.read()
_ = predict_eye(np.zeros((2,128,128,3), dtype=np.uint8), net)
init_pin()
print('[INFO] Started')
inf_time = 0
inf_count = 0
try:
for im in imgs:
raw = cv2.imread(im)
flipped = cv2.flip(raw, +1)
squared = cut_img_to_square(flipped)
img = cv2.resize(squared, (432, 432))
shape = get_face_points(img, debug, detector, predictor)
if is_event(shape):
handle(shape, img, counter_dict)
else:
reset_event(Events.NO_FACE, counter_dict)
left, right = get_eyes_points(shape)
eye_close = check_eyes(left, right, img, debug)
if is_event(eye_close):
handle(eye_close, img, counter_dict)
else:
reset_event(Events.EYE_CLOSE, counter_dict)
left_img, right_img = crop_eyes(img, left, right)
predicted, tims = predict_eye((left_img, right_img), net)
inf_time += tims
inf_count += 1
focus = check_focus(predicted, img, debug)
if is_event(focus):
handle(focus, img, counter_dict)
else:
reset_event(Events.BAD_FOCUS, counter_dict)
#cv2.imshow("Eye based drowsiness detection", img)
fps.update()
except KeyboardInterrupt:
print("[INFO] STOP")
finally:
fps.stop()
clear_pins()
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
print("[INFO] len of test: {}".format(len(imgs)))
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] inference time: {:2f}".format(inf_time))
print("[INFO] inference count {}".format(inf_count))
#cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| StarcoderdataPython |
9753411 | <reponame>RobDoesData/btccagr-calc
import matplotlib.pyplot as plt
import os
import pandas as pd
import json
from datetime import date, timedelta
def cagr_calc(start,end,ticker):
"""
start - type:date - start date for CAGR Calc
end - type:date - end date for CAGR Calc
ticker - type:str - asset to calculate CAGR on.
"""
#load price chart
df = pd.read_csv(f'price/{ticker}_historic.csv')
#convert to date object
df['Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d')
#parse start date and end date strings to datetime.
l = pd.to_datetime(start, format='%Y-%m-%d')
f = pd.to_datetime(end, format='%Y-%m-%d')
#format start and end dates as YYYY-MM-DD
startdate = f"{l}".replace("00:00:00","").replace(" ","")
enddate = f"{f}".replace("00:00:00","").replace(" ","")
#calculate how many years are between the start and end date
N = ((f - l).days/365)
#visual representation shown on front end
timegap = f"{round(N,4)}"
#Grabs the price for the start and end date
L = float(df.loc[df['Date'] == l]['Price'].values[0])
F = float(df.loc[df['Date'] == f]['Price'].values[0])
#CAGR formula, represented as xxx.xx%
CAGR = "{:.2%}".format(((F/L)**(1/N))-1)
#plots historic price graph
plt.plot(df.Date,df.Price)
#assigns title of graph
plt.title(f'{ticker} Historic Price')
#Creates shaded red region between start and end
plt.axvspan(l, f, alpha=0.2, color='red')
#labels axis
plt.xlabel(f'Time')
plt.ylabel('Price (USD)')
#assigns a master filepath on where to store images
my_path = os.path.dirname(os.path.dirname(__file__))
#saves image
plt.savefig(f"{my_path}/app/static/images/{ticker}_{start}_{end}.png")
#clears cache of plot tool
plt.cla()
#final path where HTML should load from
path = f'static/images/{ticker}_{start}_{end}.png'
#foramts start and end price as $XXXX.XX
finalprice = f"{'${:,.2f}'.format(F)}"
startingprice = f"{'${:,.2f}'.format(L)}"
readout = f"{CAGR}"
#creates json payload to pass into HTML
output = {}
output['asset'] = ticker
output['startdate'] = startdate
output['enddate'] = enddate
output['startingprice'] = startingprice
output['finalprice'] = finalprice
output['timegap'] = timegap
output['readout'] = readout
#master json that joins all assets in multiasset payload
master = {}
master[ticker] = output
output_path = path
output_json = json.dumps(master)
return output_json, output_path | StarcoderdataPython |
5030319 |
import os
from app.networks import get_ip
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
APPLICATION_DIR = os.path.dirname(os.path.realpath(__file__))
STATIC_DIR = os.path.join(APPLICATION_DIR, 'static')
IMAGES_DIR = os.path.join(APPLICATION_DIR, 'images')
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
DEBUG = True
TESTING = True
# connection to mysql (local/server) db configuration
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'mysql://bob:secret@{}/testF'.format(get_ip())
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
SQLALCHEMY_TRACK_MODIFICATIONS = False
ADMIN = os.environ.get('ADMIN') or "<EMAIL>"
# ADMIN_PASSWORD = '<PASSWORD>'
# ROLES = ["admin", "user"]
# mySQL database configuration for docker
MYSQL_RANDOM_ROOT_PASSWORD = os.environ.get('MYSQL_RANDOM_ROOT_PASSWORD') or "yes"
MYSQL_DATABASE = os.environ.get('MYSQL_DATABASE') or "iot_platform_db"
MYSQL_USER = os.environ.get('MYSQL_USER') or "iot_platform"
MYSQL_PASSWORD = os.environ.get('MYSQL_PASSWORD') or "<PASSWORD>"
# mqtt configuration
MQTT_CLIENT_ID = 'flask'
# MQTT_BROKER_URL =
MQTT_BROKER_PORT = 1883
TEMPLATES_AUTO_RELOAD = True
MQTT_USERNAME = ''
MQTT_PASSWORD = ''
MQTT_KEEPALIVE = 10
MQTT_TLS_ENABLED = False
| StarcoderdataPython |
152057 | <reponame>cloudmesh/pbs<gh_stars>1-10
from cloudmesh_base.locations import config_file
from cloudmesh_base.ConfigDict import ConfigDict
from cloudmesh_base.logger import LOGGER
log = LOGGER(__file__)
class cmd3_check:
@staticmethod
def yaml(plugin):
"""
This function reads in the cloudmesh cmd3.yaml file and tests
if the requested plugin module is included in the yaml file.
an example plugin name os "cloudmesh_pbs" to which automatically a
.plugins will be appended if not specified.
:return: True if the plugin is in the yaml file
"""
filename = config_file("/cmd3.yaml")
config = ConfigDict(filename=filename)
if not plugin.endswith(".plugins"):
testname = plugin + ".plugins"
return testname in config["cmd3"]["modules"]
if __name__ == "__main__":
print (cmd3_check.yaml("test"))
print (cmd3_check.yaml("cloudmesh_pbs"))
| StarcoderdataPython |
1915227 | import glob
import re
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.axes import Axes
from matplotlib.lines import Line2D
matplotlib.style.use("bmh")
tab20 = matplotlib.cm.get_cmap("tab20")
poss = np.linspace(0, 1, 20)[:6]
colors = [tab20(x) for x in poss]
temp_csvs = glob.glob("temp_schedulerv*/*_TL*_dt1000.csv")
time_logs = glob.glob("temp_scheduler_all/*_TL*_dt1000_run_sched*.log")
graphs = ("alexnet", "googlenet", "mobilenet", "resnet50", "squeezenet")
versions = ("3", "3.1", "4")
version_names = {"3": "AMTR", "3.1": "AMTRI", "4": "EE"}
TLs = ("85000", "999999")
def dd():
return defaultdict(dd)
def map_tl(tl):
if int(tl) == 85000:
return "85^\circ"
elif int(tl) == 999999:
return "\infty"
stats = defaultdict(dd)
temp_df = pd.DataFrame()
for temp_csv in temp_csvs:
if "motivation" in temp_csv:
continue
version, graph, TL = re.findall(
r"temp_schedulerv([\d_]+)/(\w+)_TL(\d+)_dt1000.csv", temp_csv
)[0]
version = version.replace("_", ".")
if TL not in TLs or version not in versions:
continue
df = pd.read_csv(temp_csv)
average_temp = df["temp"].mean()
crosses_threshold = len(df[df["temp"] > 85000]) / len(df["temp"]) * 100
stats[graph][version][TL]["crosses_threshold"] = crosses_threshold
df["temp"] /= 1000
stats[graph][version][TL]["temps"] = df
temp_df[f"{graph}_v{version}_TL{TL}"] = df["temp"]
for time_log in time_logs:
file_name = time_log.split("/")[1]
if "motivation" in time_log:
continue
graph, TL, version = re.findall(
r"(\w+)_TL(\d+)_dt\d+_run_sched([\d\.]+).log", file_name
)[0]
with open(time_log) as f:
content = f.read()
time = re.findall(r"([\d\.]+) per inference", content)[0]
stats[graph][version][TL]["time_taken"] = time
# print(temp_df)
xtick_lables = []
required_columns = []
for graph in sorted(stats):
for version in sorted(stats[graph]):
for TL in sorted(stats[graph][version]):
# xtick_lables.append(graph)
# fig = plt.figure()
# sns.lineplot(data=stats[graph][version][TL]["temps"], x="time", y="temp")
# if int(TL) == 85000:
# plt.axhline(85, linestyle="--", color="r")
# plt.xlabel("Time (sec)")
# plt.ylabel("Temperature $(^\circ C)$")
# plt.title(
# f"Execution scheduler {graph}, {version_names[version]}, ${{\\rm TL}} = {map_tl(TL)}$"
# )
# # plt.show()
# print(f"Saving {version} {graph} {TL}")
# fig.savefig(f"temp_schedulerv{version.replace('.', '_')}/{graph}_TL{TL}.pdf")
# plt.close(fig)
required_columns.append(f"{graph}_v{version}_TL{TL}")
temp_df = temp_df[required_columns]
fig = plt.figure(figsize=(12, 8))
ax = sns.boxplot(
data=temp_df,
dodge=True,
palette=sns.color_palette("tab20", n_colors=6),
whis=(0, 100),
linewidth=1,
)
ax.set_xlabel("Graph")
m, M = ax.get_xlim()
xtick_lables = np.linspace(m, M, len(graphs) + 1)
ax.set_xticks([(x + x1) / 2 for x, x1 in zip(xtick_lables, xtick_lables[1:])])
ax.set_xticklabels(graphs, rotation=0)
for x in np.linspace(m, M, len(graphs) + 1)[1:-1]:
ax.axvline(x=x, linewidth=1)
ax.axhline(y=85, color="r", alpha=0.5)
ax.legend(
[Line2D([0], [0], color=col, lw=4) for x, col in zip(poss, colors)],
[
f"{version_names[version]}, ${{\\rm TL}}={map_tl(TL)}$"
for version in versions
for TL in TLs
],
loc="center left",
bbox_to_anchor=(1, 0.3),
)
ax.set_ylabel(r"Temperature $(^\circ C)$")
plt.title("Temperature vs. Graph")
fig.subplots_adjust(right=0.82)
# plt.show()
print("Saving temperatures")
fig.savefig("temp_scheduler_temperatures.pdf")
indexes = []
rows = []
for graph in sorted(stats):
for version in sorted(stats[graph]):
for TL in sorted(stats[graph][version]):
data = stats[graph][version][TL]
indexes.append((graph, version, float(TL) / 1000))
rows.append(
[
float(data["crosses_threshold"]),
float(data["time_taken"]),
]
)
df = pd.DataFrame(
rows,
columns=["Crosses Threshold", "Time Taken"],
index=pd.MultiIndex.from_tuples(indexes, names=["Graph", "Version", "TL"]),
)
# print(df.to_latex())
dct = defaultdict(list)
dat = defaultdict(list)
dat2 = defaultdict(list)
dtt = defaultdict(list)
def avg(xs):
return sum(xs) / len(xs)
for graph in graphs:
for version in versions:
_data = stats[graph][version]
_dct = _data["85000"]["crosses_threshold"] - _data["999999"]["crosses_threshold"]
_dat = avg(_data["85000"]["temps"]["temp"])
_dat2 = avg(_data["999999"]["temps"]["temp"])
_dtt = (
float(_data["85000"]["time_taken"]) - float(_data["999999"]["time_taken"])
) / float(_data["999999"]["time_taken"])
dct[version].append(_dct)
dat[version].append(_dat)
dat2[version].append(_dat2)
dtt[version].append(_dtt)
for version in versions:
print(">>>", version)
print("% Change in Crosses Threshold", avg(dct[version]))
print("% Change in Time taken", avg(dtt[version]) * 100)
print("Average Temperature (85000)", avg(dat[version]))
print("Average Temperature (999999)", avg(dat2[version]))
for column in df.columns:
fig = plt.figure(figsize=(12, 8))
ax: Axes = df.loc[:, column].plot(kind="bar", color=colors, stacked=True)
ax.legend(
[Line2D([0], [0], color=col, lw=4) for x, col in zip(poss, colors)],
[
f"{version_names[version]}, ${{\\rm TL}}={map_tl(TL)}$"
for version in versions
for TL in TLs
],
loc="center left",
bbox_to_anchor=(1, 0.5),
)
m, M = ax.get_xlim()
xtick_lables = np.linspace(m, M, len(graphs) + 1)
ax.set_xticks([(x + x1) / 2 for x, x1 in zip(xtick_lables, xtick_lables[1:])])
ax.set_xticklabels(graphs, rotation=0)
for x in np.linspace(m, M, len(graphs) + 1)[1:-1]:
ax.axvline(x=x)
if column == "Crosses Threshold":
ax.set_ylabel(column + " (%)")
elif column == "Time Taken":
ax.set_ylabel(column + " (sec)")
ax.set_xlabel("Graph")
plt.title(f"{column} vs. Graph")
fig.subplots_adjust(right=0.82)
# plt.show()
print(f"Saving {column}")
fig.savefig(f"temp_scheduler_{column.lower().replace(' ', '_')}.pdf")
| StarcoderdataPython |
11252792 | # Copyright (c) 2016, 2018 <NAME> <<EMAIL>>
try:
import pkg_resources
except ImportError:
pkg_resources = None
def is_namespace(modname):
return pkg_resources is not None and modname in pkg_resources._namespace_packages
| StarcoderdataPython |
3507029 | <gh_stars>10-100
import numpy as np
from simple_convnet import convnet as cn
from scipy.optimize import approx_fprime
def _check_gradients(layer_args, input_shape):
rand = np.random.RandomState(0)
net = cn.SoftmaxNet(layer_args=layer_args, input_shape=input_shape, rand_state=rand)
x = rand.randn(*(10,)+net.input_shape)/100
y = rand.randn(10) > 0
by = net.binarize_labels(y)
g1 = approx_fprime(net.get_params(), net.cost_for_params, 1e-5, x, by)
g2 = net.param_grad(x, by)
err = np.max(np.abs(g1-g2))/np.abs(g1).max()
print err
assert err < 1e-3, 'incorrect gradient!'
def test_dense_layer():
layer_args = [(cn.DenseLayer, dict(num_nodes=20)),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (10,))
def test_relu_layer():
layer_args = [(cn.ReluLayer, dict()),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (10,))
def test_sigmoid_layer():
layer_args = [(cn.SigmoidLayer, dict()),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (10,))
def test_conv_layer():
layer_args = [(cn.ConvLayer, dict(num_filters=5, filter_shape=(3,3))),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (8,8,3))
def test_convbias_layer():
layer_args = [(cn.ConvLayer, dict(num_filters=5, filter_shape=(3,3))),
(cn.BiasLayer, dict()),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (8,8,3))
def test_pool_layer():
layer_args = [(cn.ConvLayer, dict(num_filters=5, filter_shape=(3,3))),
(cn.MeanPoolingLayer, dict(pool_size=2)),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (8,8,3))
def test_deep():
layer_args = [(cn.ConvLayer, dict(num_filters=5, filter_shape=(3,3))),
(cn.BiasLayer, dict()),
(cn.ReluLayer, dict()),
(cn.MeanPoolingLayer, dict(pool_size=2)),
(cn.ConvLayer, dict(num_filters=5, filter_shape=(3,3))),
(cn.BiasLayer, dict()),
(cn.SigmoidLayer, dict()),
(cn.MeanPoolingLayer, dict(pool_size=2)),
(cn.DenseLayer, dict(num_nodes=10)),
(cn.BiasLayer, dict()),
(cn.DenseLayer, dict(num_nodes=2))]
_check_gradients(layer_args, (18,18,3))
def test_fit():
layer_args = [(cn.DenseLayer, dict(num_nodes=4)),
(cn.DenseLayer, dict(num_nodes=2))]
net = cn.SoftmaxNet(layer_args=layer_args, input_shape=(2,))
num = 1000
rand = np.random.RandomState(0)
x = rand.rand(num,2)
y = np.zeros(num)
y[x[:,0]>0.5] = 1
net.fit(x, y, batch_size=16, learn_rate=1, num_epoch=100, verbose=True)
yp = net.predict(x)
acc = np.mean(y==yp)
assert acc > 0.7
| StarcoderdataPython |
5029292 | # -*- coding: utf-8 -*-
import sys
import unittest
from glob import glob
sys.path.append('../loc_measure')
from loc_measure import LocMeasure
class TestExtension(unittest.TestCase):
def setUp(self):
cfg_file = '../loc_measure/config.json'
self.loc_measure = LocMeasure(cfg_file)
def run_test(self, language, actual):
self.loc_measure.language = language
code_path_list = glob('code*.*')
results = []
for code_path in code_path_list:
loc = self.loc_measure.count(code_path)
if loc is not None:
results.append(loc)
self.assertEqual(len(results), actual)
def test_for_c_cpp(self):
self.run_test('C/C++', 3)
def test_for_python(self):
self.run_test('Python', 1)
def test_for_java(self):
self.run_test('Java', 1)
def test_for_ruby(self):
self.run_test('Ruby', 1)
def test_for_go(self):
self.run_test('Go', 1)
def test_for_r(self):
self.run_test('R', 1)
def test_for_shellscript(self):
self.run_test('ShellScript', 1)
def test_for_perl(self):
self.run_test('Perl', 1)
def test_for_php(self):
self.run_test('PHP', 1)
def test_for_lua(self):
self.run_test('Lua', 1)
def test_for_fortran(self):
self.run_test('Fortran', 1)
def test_for_html(self):
self.run_test('HTML', 1)
def test_for_css(self):
self.run_test('CSS', 1)
def test_for_scala(self):
self.run_test('Scala', 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1902374 | <filename>mxdc/widgets/plotter.py
import numpy
from gi.repository import Gtk
from matplotlib import cm, transforms
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.colors import Normalize
from matplotlib.dates import MinuteLocator, SecondLocator
from matplotlib.figure import Figure
from matplotlib.ticker import FormatStrFormatter, ScalarFormatter
from mxdc.utils import misc
from mxdc.widgets import dialogs
GRID_COLORMAP = 'viridis'
GRID_INTERPOLATION = 'nearest' # nearest
class PlotterToolbar(NavigationToolbar2GTK3):
toolitems = (
('Home', 'Reset original view', 'go-home', 'home'),
('Back', 'Back to previous view', 'go-previous', 'back'),
('Forward', 'Forward to next view', 'go-next', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'view-fullscreen', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom-fit-best', 'zoom'),
(None, None, None, None),
('Save', 'Save the figure', 'media-floppy', 'save_figure'),
)
def __init__(self, canvas, window):
super().__init__(canvas, window)
for i, toolitem in enumerate(self):
if isinstance(toolitem, Gtk.ToolButton):
icon_name = f'{self.toolitems[i][2]}-symbolic'
image = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.SMALL_TOOLBAR)
toolitem.set_icon_widget(image)
# toolitems = (
# ('Home', 'Reset original view', 'go-home-symbolic', 'home'),
# ('Back', 'Back to previous view', 'go-previous-symbolic', 'back'),
# ('Forward', 'Forward to next view', 'go-next-symbolic', 'forward'),
# (None, None, None, None),
# ('Pan', 'Pan axes with left mouse, zoom with right', 'view-fullscreen-symbolic', 'pan'),
# ('Zoom', 'Zoom to rectangle', 'zoom-fit-best-symbolic', 'zoom'),
# (None, None, None, None),
# ('Save', 'Save the figure', 'media-floppy-symbolic', 'save_figure'),
# )
#
# def _init_toolbar(self):
# self.set_style(Gtk.ToolbarStyle.ICONS)
#
# self._gtk_ids = {}
# for text, tooltip_text, icon, callback in self.toolitems:
# if text is None:
# self.insert(Gtk.SeparatorToolItem(), -1)
# continue
# self._gtk_ids[text] = tbutton = Gtk.ToolButton.new(
# Gtk.Image.new_from_icon_name(icon, Gtk.IconSize.SMALL_TOOLBAR)
# )
# tbutton.set_label(text)
# self.insert(tbutton, -1)
# tbutton.connect('clicked', getattr(self, callback))
# tbutton.set_tooltip_text(tooltip_text)
#
# toolitem = Gtk.SeparatorToolItem()
# self.insert(toolitem, -1)
# toolitem.set_draw(False)
# toolitem.set_expand(True)
#
# toolitem = Gtk.ToolItem()
# self.insert(toolitem, -1)
# self.message = Gtk.Label()
# toolitem.add(self.message)
# self.set_icon_size(Gtk.IconSize.SMALL_TOOLBAR)
# self.show_all()
class Plotter(Gtk.Alignment):
def __init__(self, loop=False, buffer_size=2500, xformat='%g', dpi=80):
super().__init__()
self.set(0.5, 0.5, 1, 1)
self.format_x = FormatStrFormatter(xformat)
self.ring_buffer = loop
self.buffer_size = buffer_size
self.colormap = cm.get_cmap('Dark2')
self.axis_space = 0.92
self.cursor_line = None
self.cursor_points = {}
self.plot_scales = {}
self.lines = {}
self.axis = {}
self.data_type = {}
self.values = None
self.grid_mode = False
self.grid_specs = {}
self.grid_image = None
self.grid_norm = Normalize()
self.grid_snake = False
self.fig = Figure(figsize=(10, 6), dpi=dpi)
self.clear()
self.canvas = FigureCanvas(self.fig) # a Gtk.DrawingArea
self.canvas.mpl_connect('motion_notify_event', self.on_mouse_motion)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.toolbar = PlotterToolbar(self.canvas, dialogs.MAIN_WINDOW)
box.pack_start(self.canvas, True, True, 0)
box.pack_start(self.toolbar, False, False, 0)
self.add(box)
self.show_all()
def clear(self, specs=None):
"""
Clear the plot and configure it for the given specifications.
:param specs: dictionary containing configuration parameters
"""
self.fig.clear()
self.fig.subplots_adjust(bottom=0.1, left=0.05, top=0.90, right=self.axis_space)
specs = {} if specs is None else specs
self.grid_mode = 'grid' in specs.get('scan_type', '')
self.data_type = specs.get('data_type')
self.values = misc.RecordArray(self.data_type, size=self.buffer_size, loop=self.ring_buffer)
self.cursor_line = None
self.lines = {}
self.grid_snake = specs.get('grid_snake', False)
self.grid_specs = {}
self.grid_image = None
self.grid_norm = Normalize()
ax = self.fig.add_subplot(111)
ax.yaxis.tick_right()
ax.yaxis.set_major_formatter(ScalarFormatter())
self.axis = {'default': ax}
if specs:
names = self.data_type['names'][1:]
scales = specs.get('data_scale')
if scales:
self.plot_scales = {
('default' if i == 0 else 'axis-{}'.format(i)): scale
for i, scale in enumerate(scales)
}
else:
self.plot_scales = {
('default' if i == 0 else 'axis-{}'.format(i)): (name,)
for i, name in enumerate(names)
}
def get_axis_for(self, name):
"""
Return the axis for the named line
:param name: line name
:return: an axis object
"""
return self.lines[name].axes
def add_axis(self, name=None, label=""):
"""
Add a named axis to the plot with the
:param name: axis name
:param label: axis label
:return: matplotlib axis object
"""
name = 'axis-{}'.format(len(self.axis)) if not name else name
default = self.axis.get('default')
index = len(self.axis) + 1
axis_position = 1 / (self.axis_space ** (index - 1))
self.fig.subplots_adjust(right=self.axis_space ** index)
ax = self.fig.add_axes(default.get_position(), sharex=default, frameon=False)
ax.spines['right'].set_position(('axes', axis_position))
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_frame_on(True)
ax.patch.set_visible(False)
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
ax.set_ylabel(label)
for label in ax.get_xticklabels():
label.set_visible(False)
self.axis[name] = ax
self.plot_scales[name] = ()
return ax
def add_line(self, xpoints, ypoints, style='-', name='', lw=1, axis="default", alpha=1.0, color=None, redraw=True,
markevery=[]):
"""
Add a named line to the plot
:param xpoints: initial x axis values
:param ypoints: initial y axis values
:param style: matplotlib line style string
:param name: line name, optional
:param lw: line width
:param axis: optional name of axis of add line to
:param alpha: line transparency
:param color: line color
:param redraw: whether to redraw the line or note
:param markevery: matplotlit 'markevery' parameter, set to None to show markers at every point
"""
assert (len(xpoints) == len(ypoints))
if axis not in self.axis:
self.add_axis(axis)
name = 'line-{}'.format(len(self.lines)) if not name else name
color = self.colormap(len(self.lines)) if not color else color
self.axis[axis].autoscale(False)
xmin_current, xmax_current = self.axis[axis].get_xlim()
ymin_current, ymax_current = self.axis[axis].get_ylim()
line, = self.axis[axis].plot(
xpoints, ypoints, '.', ls=style, lw=lw, markersize=8,
label=name, alpha=alpha, markevery=markevery, color=color
)
# adjust axes limits as necessary
xmin, xmax = misc.get_min_max(xpoints, ldev=0, rdev=0)
ymin, ymax = misc.get_min_max(ypoints, ldev=1, rdev=1)
xmin, xmax = min(xmin, xmin_current), max(xmax, xmax_current)
ymin, ymax = min(ymin, ymin_current), max(ymax, ymax_current)
line.axes.set_xlim(xmin, xmax)
line.axes.set_ylim(ymin, ymax)
self.lines[name] = line
if name not in self.plot_scales[axis]:
self.plot_scales[axis] += (name,)
if len(xpoints) > 1:
self.values.add_func(name, xpoints, ypoints)
if redraw:
self.redraw()
def add_point(self, row, redraw=True):
"""
Add a row of scan points to the data table
:param row: sequence of values to add
:param redraw: Whether to redraw the plot
"""
if numpy.nan in row:
return
self.values.append(row)
x_name = self.data_type['names'][0]
if self.grid_mode:
# no lines for grid mode
self.update_grid_data()
elif not self.lines:
count = 0
for axis, lines in self.plot_scales.items():
if axis != 'default':
self.add_axis(name=axis)
for line in lines:
self.add_line(
self.values.data[x_name], self.values.data[line], color=self.colormap(count),
name=line, axis=axis, markevery=[-1]
)
count += 1
else:
xmin, xmax = misc.get_min_max(self.values.data[x_name], ldev=0, rdev=0)
for axis, lines in self.plot_scales.items():
ymin = ymax = None
ax = None
for name in lines:
line = self.lines[name]
line.set_data(self.values.data[x_name], self.values.data[name])
ax = line.axes
ylo, yhi = misc.get_min_max(self.values.data[name], ldev=0.5, rdev=0.5)
if ymin is None:
ymin, ymax = ylo, yhi
else:
ymin, ymax = min(ymin, ylo), max(ymax, yhi)
ymin, ymax = ymin, ymax
# adjust axes limits as necessary
if ax is not None and xmin != xmax and ymin != ymax:
offset = (ymax - ymin) * .1
ax.set_ylim(ymin - offset, ymax + offset)
ax.set_xlim(xmin, xmax)
if len(self.lines) > 1:
default = self.axis.get('default')
xmin_current, xmax_current = default.get_xlim()
default.set_xlim(min(xmin, xmin_current), max(xmax, xmax_current))
if redraw:
self.redraw()
def new_row(self, index):
"""
Prepare for A new row of data
:param index: row index for next row
"""
if self.grid_mode and index > 1:
# for slew grid scans, data needs to be padded/truncated
y_name = self.data_type['names'][1]
yo = self.values.data[y_name]
x_size = (yo == yo[0]).sum()
y_size = index
pad = x_size * y_size - yo.shape[0]
if pad == 0:
return
elif pad > 0:
for i in range(pad):
self.values.append(self.values.data[-1]) # padding
elif pad < 0:
self.values.length = x_size * y_size
self.update_grid_data()
def update_grid_data(self):
"""
Update the grid image values
"""
x_name, y_name, counts_name = self.data_type['names'][:3]
xo = self.values.data[x_name]
yo = self.values.data[y_name]
counts = self.values.data[counts_name]
x_min, x_max = xo.min(), xo.max()
y_min, y_max = yo.min(), yo.max()
self.grid_norm.autoscale(counts)
xsize = (yo == yo[0]).sum()
ysize = int(numpy.ceil(yo.shape[0] / xsize))
# pad unfilled values with nan
blanks = xsize * ysize - counts.shape[0]
if blanks:
counts = numpy.pad(counts, (0, blanks), 'constant', constant_values=(numpy.nan, numpy.nan))
count_data = numpy.resize(counts, (ysize, xsize))
# flip alternate rows
if self.grid_snake:
count_data[1::2, :] = count_data[1::2, ::-1]
self.grid_specs.update({
'x': xo,
'y': yo,
'counts': count_data,
})
extent = [
x_min, x_max,
y_min, y_max,
]
if self.grid_image is None:
default = self.axis.get('default')
self.grid_image = default.imshow(
self.grid_specs['counts'], cmap=cm.get_cmap(GRID_COLORMAP), origin='lower',
norm=self.grid_norm, extent=extent, aspect='auto',
interpolation=GRID_INTERPOLATION,
)
else:
self.grid_image.set_data(self.grid_specs['counts'])
self.grid_image.set_extent(extent)
# set axis limits
self.grid_image.axes.set_xlim(extent[:2])
self.grid_image.axes.set_ylim(extent[-2:])
self.redraw()
def get_records(self):
"""
Return the data array manager for the plot
"""
return self.values
def set_labels(self, title="", x_label="", y1_label=""):
default = self.axis.get('default')
default.set_xlabel(x_label, ha='right', va='top')
default.set_ylabel(y1_label)
default.xaxis.set_label_coords(1.0, -0.075)
def set_time_labels(self, labels, fmt, maj_int, min_int):
default = self.axis.get('default')
default.xaxis.set_major_locator(MinuteLocator(interval=maj_int))
default.xaxis.set_minor_locator(SecondLocator(interval=min_int))
if len(default.xaxis.get_major_ticks()) < len(labels):
labels.pop(0)
default.set_xticklabels([d != ' ' and d.strftime(fmt) or '' for d in labels])
def redraw(self):
if not self.grid_mode:
lines = list(self.lines.values())
labels = list(self.lines.keys())
self.axis['default'].legend(
lines, labels, loc='upper left', bbox_to_anchor=(0, 1.075), ncol=8, fancybox=False,
framealpha=0.0, edgecolor='inherit', borderaxespad=0, fontsize=9
)
self.canvas.draw_idle()
def on_mouse_motion(self, event):
default = self.axis.get('default')
if event.inaxes and self.lines and not self.grid_mode:
x, y = event.xdata, event.ydata
if self.cursor_line is None:
self.cursor_line = default.axvline(x, lw=1, color='#3a7ca8', antialiased=None)
for axis, lines in self.plot_scales.items():
for name in lines:
y_value = self.values(name, x)
ax = self.axis[axis]
if name in self.lines:
line = self.lines[name]
trans = transforms.blended_transform_factory(
ax.get_yticklabels()[0].get_transform(), ax.transData
)
self.cursor_points[name] = ax.text(
1, y_value, "< {}".format(name), color=line.get_color(), transform=trans, ha="left",
va="center"
)
else:
self.cursor_line.set_xdata(x)
for axis, lines in self.plot_scales.items():
for name in lines:
if name in self.lines:
y_value = self.values(name, x)
if name in self.cursor_points:
self.cursor_points[name].set_position((1, y_value))
self.canvas.draw_idle()
else:
if self.cursor_line:
self.cursor_line.remove()
self.cursor_line = None
for name in list(self.cursor_points.keys()):
mark = self.cursor_points.pop(name)
mark.remove()
self.canvas.draw_idle()
| StarcoderdataPython |
201597 | <reponame>CMLivingston/pants<gh_stars>0
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from builtins import str
from contextlib import contextmanager
from future.utils import PY3
from pants.backend.python.interpreter_cache import PythonInterpreter, PythonInterpreterCache
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir
from pants_test.backend.python.interpreter_selection_utils import (PY_27, PY_36,
python_interpreter_path,
skip_unless_python27_and_python36_present)
from pants_test.test_base import TestBase
from pants_test.testutils.pexrc_util import setup_pexrc_with_pex_python_path
class TestInterpreterCache(TestBase):
@staticmethod
def _make_bad_requirement(requirement):
"""Turns a requirement that passes into one we know will fail.
E.g. 'CPython==2.7.5' becomes 'CPython==99.7.5'
"""
if PY3:
return str(requirement).replace('==3', '==99')
else:
return str(requirement).replace('==2.', '==99')
def setUp(self):
super(TestInterpreterCache, self).setUp()
self._interpreter = PythonInterpreter.get()
def _create_interpreter_cache(self, setup_options=None):
Subsystem.reset(reset_options=True)
self.context(for_subsystems=[PythonInterpreterCache], options={
'python-setup': setup_options,
})
return PythonInterpreterCache.global_instance()
@contextmanager
def _setup_cache(self, constraints=None, search_paths=None):
with temporary_dir() as path:
cache = self._setup_cache_at(path, constraints=constraints, search_paths=search_paths)
yield cache, path
def _setup_cache_at(self, path, constraints=None, search_paths=None):
setup_options = {'interpreter_cache_dir': path}
if constraints is not None:
setup_options.update(interpreter_constraints=constraints)
if search_paths is not None:
setup_options.update(interpreter_search_paths=search_paths)
return self._create_interpreter_cache(setup_options=setup_options)
def test_cache_setup_with_no_filters_uses_repo_default_excluded(self):
bad_interpreter_requirement = self._make_bad_requirement(self._interpreter.identity.requirement)
with self._setup_cache(constraints=[bad_interpreter_requirement]) as (cache, _):
self.assertEqual([], cache.setup())
def test_cache_setup_with_no_filters_uses_repo_default(self):
with self._setup_cache(constraints=[]) as (cache, _):
self.assertIn(self._interpreter.identity, [interp.identity for interp in cache.setup()])
def test_cache_setup_with_filter_overrides_repo_default(self):
repo_default_requirement = str(self._interpreter.identity.requirement)
bad_interpreter_requirement = self._make_bad_requirement(repo_default_requirement)
with self._setup_cache(constraints=[bad_interpreter_requirement]) as (cache, _):
self.assertIn(self._interpreter.identity,
[interp.identity
for interp in cache.setup(filters=(repo_default_requirement,))])
@skip_unless_python27_and_python36_present
def test_interpereter_cache_setup_using_pex_python_paths(self):
"""Test cache setup using interpreters from a mocked PEX_PYTHON_PATH."""
py27_path, py36_path = python_interpreter_path(PY_27), python_interpreter_path(PY_36)
with setup_pexrc_with_pex_python_path([py27_path, py36_path]):
with self._setup_cache(constraints=['CPython>=2.7,<3'],
search_paths=['<PEXRC>']) as (cache, _):
self.assertIn(py27_path, {pi.binary for pi in cache.setup()})
with self._setup_cache(constraints=['CPython>=3.6,<4'],
search_paths=['<PEXRC>']) as (cache, _):
self.assertIn(py36_path, {pi.binary for pi in cache.setup()})
def test_setup_cached_warm(self):
with self._setup_cache() as (cache, path):
interpreters = cache.setup()
self.assertGreater(len(interpreters), 0)
cache = self._setup_cache_at(path)
self.assertEqual(sorted(interpreters), sorted(list(cache._setup_cached())))
def test_setup_cached_cold(self):
with self._setup_cache() as (cache, _):
self.assertEqual([], list(cache._setup_cached()))
def test_interpreter_from_relpath_purges_stale_interpreter(self):
"""
Simulates a stale interpreter cache and tests that _interpreter_from_relpath
properly detects it and removes the stale dist directory.
See https://github.com/pantsbuild/pants/issues/3416 for more info.
"""
with temporary_dir() as temp_dir:
# Setup a interpreter distribution that we can safely mutate.
test_interpreter_binary = os.path.join(temp_dir, 'python')
os.symlink(sys.executable, test_interpreter_binary)
with self._setup_cache(constraints=[]) as (cache, path):
# Setup cache for test interpreter distribution.
identity_str = str(PythonInterpreter.from_binary(test_interpreter_binary).identity)
cached_interpreter_dir = os.path.join(cache._cache_dir, identity_str)
safe_mkdir(cached_interpreter_dir)
cached_symlink = os.path.join(cached_interpreter_dir, 'python')
os.symlink(test_interpreter_binary, cached_symlink)
# Remove the test interpreter binary from filesystem and assert that the cache is purged.
os.remove(test_interpreter_binary)
self.assertEqual(os.path.exists(test_interpreter_binary), False)
self.assertEqual(os.path.exists(cached_interpreter_dir), True)
cache._interpreter_from_relpath(identity_str)
self.assertEqual(os.path.exists(cached_interpreter_dir), False)
| StarcoderdataPython |
6417124 | <filename>server/mysite/studygroups/apps.py
from django.apps import AppConfig
class StudygroupsConfig(AppConfig):
name = 'studygroups'
| StarcoderdataPython |
3281692 | # https://leetcode.com/problems/contains-duplicate/
from typing import List
class Solution:
def containsDuplicateSet(self, nums: List[int]) -> bool:
"""
Set approach
Time complexity: O(n)
Space complexity: O(n)
"""
seen = set()
for n in nums:
if n in seen:
return True
seen.add(n)
return False
def containsDuplicateSort(self, nums: List[int]) -> bool:
"""
Sorting approach
Time complexity: O(n log n)
Space complexity: O(1)
"""
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
return True
return False
# === tests ===
print(Solution().containsDuplicateSet([1, 2, 3, 1])) # True
print(Solution().containsDuplicateSet([1, 2, 3, 4])) # False
print(Solution().containsDuplicateSet([1, 2, 3, 1])) # True
print(Solution().containsDuplicateSet([1, 2, 3, 4])) # False
| StarcoderdataPython |
5014664 | <reponame>Norwa9/missing_modalities<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class LSTMEncoder(nn.Module):
''' one directional LSTM encoder
'''
def __init__(self, input_size, hidden_size, embd_method='last'):
super(LSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True)
assert embd_method in ['maxpool', 'attention', 'last']
self.embd_method = embd_method
if self.embd_method == 'attention':
self.attention_vector_weight = nn.Parameter(torch.Tensor(hidden_size, 1))
self.attention_layer = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.Tanh(),
)
self.softmax = nn.Softmax(dim=-1)
def embd_attention(self, r_out, h_n):
''''
参考这篇博客的实现:
https://blog.csdn.net/dendi_hust/article/details/94435919
https://blog.csdn.net/fkyyly/article/details/82501126
论文:Hierarchical Attention Networks for Document Classification
formulation: lstm_output*softmax(u * tanh(W*lstm_output + Bias)
W and Bias 是映射函数,其中 Bias 可加可不加
u 是 attention vector 大小等于 hidden size
'''
hidden_reps = self.attention_layer(r_out) # [batch_size, seq_len, hidden_size]
atten_weight = (hidden_reps @ self.attention_vector_weight) # [batch_size, seq_len, 1]
atten_weight = self.softmax(atten_weight) # [batch_size, seq_len, 1]
# [batch_size, seq_len, hidden_size] * [batch_size, seq_len, 1] = [batch_size, seq_len, hidden_size]
sentence_vector = torch.sum(r_out * atten_weight, dim=1) # [batch_size, hidden_size]
return sentence_vector
def embd_maxpool(self, r_out, h_n):
# embd = self.maxpool(r_out.transpose(1,2))
# r_out.size()=>[batch_size, seq_len, hidden_size]
# r_out.transpose(1, 2) => [batch_size, hidden_size, seq_len]
in_feat = r_out.transpose(1,2)
embd = F.max_pool1d(in_feat, in_feat.size(2), in_feat.size(2))
return embd.squeeze()
def embd_last(self, r_out, h_n):
#Just for one layer and single direction
return h_n.squeeze()
def forward(self, x):
'''
r_out shape: seq_len, batch, num_directions * hidden_size
hn and hc shape: num_layers * num_directions, batch, hidden_size
'''
r_out, (h_n, h_c) = self.rnn(x)
embd = getattr(self, 'embd_'+self.embd_method)(r_out, h_n)
return embd | StarcoderdataPython |
11261342 | <gh_stars>0
from arm.logicnode.arm_nodes import *
class PickObjectNode(ArmLogicTreeNode):
"""Use to pick the rigid body in a location using the screen
coordinates (only x/y values are used)."""
bl_idname = 'LNPickObjectNode'
bl_label = 'Pick Rigid Body'
arm_version = 1
def init(self, context):
super(PickObjectNode, self).init(context)
self.add_input('NodeSocketVector', 'Screen Coords')
self.add_output('ArmNodeSocketObject', 'Rigid Body')
self.add_output('NodeSocketVector', 'Hit')
add_node(PickObjectNode, category=PKG_AS_CATEGORY, section='ray')
| StarcoderdataPython |
1792891 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from ovs.log.logHandler import LogHandler
logger = LogHandler('extensions', name='exportfs')
class Nfsexports(object):
"""
Basic management for /etc/exports
"""
def __init__(self):
self._exportsFile = '/etc/exports'
self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs']
self._restart = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra']
self._rpcmountd_stop = ['/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd']
self._rpcmountd_start = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/rpc.mountd', '--manage-gids']
def _slurp(self):
"""
Read from /etc/exports
"""
f = open(self._exportsFile, 'r')
dlist = []
for line in f:
if not re.match('^\s*$', line):
dlist.append(line)
f.close()
dlist = [i.strip() for i in dlist if not i.startswith('#')]
dlist = [re.split('\s+|\(|\)', i) for i in dlist]
keys = ['dir', 'network', 'params']
ldict = [dict(zip(keys, line)) for line in dlist]
return ldict
def add(self, directory, network, params):
"""
Add entry to /etc/exports
@param directory: directory to export
@param network: network range allowed
@param params: params for export (eg, 'ro,async,no_root_squash,no_subtree_check')
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
logger.info('Directory already exported, to export with different params please first remove')
return
f = open(self._exportsFile, 'a')
f.write('%s %s(%s)\n' % (directory, network, params))
f.close()
def remove(self, directory):
"""
Remove entry from /etc/exports
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
l.remove(i)
f = open(self._exportsFile, 'w')
for i in l:
f.write("%s %s(%s) \n" % (i['dir'], i['network'], i['params']))
f.close()
return
def list_exported(self):
"""
List the current exported filesystems
"""
exports = {}
output = subprocess.check_output(self._cmd)
for export in re.finditer('(\S+?)[\s\n]+(\S+)\n?', output):
exports[export.group(1)] = export.group(2)
return exports
def unexport(self, directory):
"""
Unexport a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if not directory in exports.keys():
logger.info('Directory %s currently not exported' % directory)
return
logger.info('Unexporting {}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory))
cmd.extend(['-u', '{}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory)])
subprocess.call(cmd)
def export(self, directory, network='*'):
"""
Export a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if directory in exports.keys():
logger.info('Directory already exported with options %s' % exports[directory])
return
logger.info('Exporting {}:{}'.format(network, directory))
cmd.extend(['-v', '{}:{}'.format(network, directory)])
subprocess.call(cmd)
subprocess.call(self._restart)
def trigger_rpc_mountd(self):
subprocess.call(self._rpcmountd_stop)
subprocess.call(self._rpcmountd_start)
| StarcoderdataPython |
6564091 | # Copyright (c) 2013, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, cstr
from frappe import _
import ast
import datetime
def execute(filters=None):
columns, data = [], []
columns = [
{
"fieldname": "fleet_no",
"label": _("Fleet No"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "reg_no",
"label": _("Reg. No"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "driver_name",
"label": _("Driver Name"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "contact",
"label": _("Contact"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "cargo_type",
"label": _("Cargo Type"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "destination",
"label": _("Destination"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "client",
"label": _("Client"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "route",
"label": _("Route"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "dispatch_date",
"label": _("Dispatch Date"),
"fieldtype": "Date",
"width": 100
},
{
"fieldname": "offloading_date",
"label": _("Offloading Date"),
"fieldtype": "Date",
"width": 100
},
{
"fieldname": "position",
"label": _("Position"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "next_cargo_allocation",
"label": _("Next Cargo Allocation"),
"fieldtype": "Data",
"width": 100
}
]
today = datetime.date.today().strftime("%Y-%m-%d")
yesterday = datetime.date.today() - datetime.timedelta(1)
day_before = datetime.date.today() - datetime.timedelta(2)
yesterday = yesterday.strftime("%Y-%m-%d")
day_before = day_before.strftime("%Y-%m-%d")
data = frappe.db.sql('''SELECT
`tabVehicle`.fleet_number AS fleet_no,
CONCAT(`tabVehicle`.number_plate, '/', `tabTrailer`.number_plate) AS reg_no,
`tabEmployee`.employee_name AS driver_name,
`tabEmployee`.cell_number AS contact,
CASE
WHEN main_trip.name IS NOT NULL THEN main_trip.main_goods_description
WHEN return_trip.name IS NOT NULL THEN return_trip.return_goods_description
WHEN main_trip_offloaded.name IS NOT NULL THEN main_trip_offloaded.main_goods_description
WHEN return_trip_offloaded.name IS NOT NULL THEN return_trip_offloaded.return_goods_description
END AS cargo_type,
CASE
WHEN main_trip.name IS NOT NULL THEN main_trip.main_cargo_destination_city
WHEN return_trip.name IS NOT NULL THEN return_trip.return_cargo_destination_city
WHEN main_trip_offloaded.name IS NOT NULL THEN main_trip_offloaded.main_cargo_destination_city
WHEN return_trip_offloaded.name IS NOT NULL THEN return_trip_offloaded.return_cargo_destination_city
END AS destination,
CASE
WHEN main_trip.name IS NOT NULL THEN main_trip.main_customer
WHEN return_trip.name IS NOT NULL THEN return_trip.return_customer
WHEN main_trip_offloaded.name IS NOT NULL THEN main_trip_offloaded.main_customer
WHEN return_trip_offloaded.name IS NOT NULL THEN return_trip_offloaded.return_customer
END AS client,
CASE
WHEN UPPER(main_trip.main_cargo_location_country) = 'TANZANIA' AND UPPER(main_trip.main_cargo_destination_country) <> 'TANZANIA' THEN 'Import'
WHEN UPPER(return_trip.return_cargo_location_country) = 'TANZANIA' AND UPPER(return_trip.return_cargo_destination_country) <> 'TANZANIA' THEN 'Import'
WHEN UPPER(main_trip.main_cargo_location_country) <> 'TANZANIA' AND UPPER(main_trip.main_cargo_destination_country) = 'TANZANIA' THEN 'Export'
WHEN UPPER(return_trip.return_cargo_location_country) <> 'TANZANIA' AND UPPER(return_trip.return_cargo_destination_country) = 'TANZANIA' THEN 'Export'
WHEN UPPER(main_trip_offloaded.main_cargo_location_country) = 'TANZANIA' AND UPPER(main_trip_offloaded.main_cargo_destination_country) <> 'TANZANIA' THEN 'Import'
WHEN UPPER(return_trip_offloaded.return_cargo_location_country) = 'TANZANIA' AND UPPER(return_trip_offloaded.return_cargo_destination_country) <> 'TANZANIA' THEN 'Import'
WHEN UPPER(main_trip_offloaded.main_cargo_location_country) <> 'TANZANIA' AND UPPER(main_trip_offloaded.main_cargo_destination_country) = 'TANZANIA' THEN 'Export'
WHEN UPPER(return_trip_offloaded.return_cargo_location_country) <> 'TANZANIA' AND UPPER(return_trip_offloaded.return_cargo_destination_country) = 'TANZANIA' THEN 'Export'
END AS route,
CASE
WHEN main_trip.name IS NOT NULL THEN (SELECT loading_date FROM `tabRoute Steps Table` WHERE parent = main_trip.name \
AND parentfield = 'main_route_steps' AND loading_date IS NOT NULL LIMIT 1)
WHEN return_trip.name IS NOT NULL THEN (SELECT loading_date FROM `tabRoute Steps Table` WHERE parent = return_trip.name \
AND parentfield = 'return_route_steps' AND loading_date IS NOT NULL LIMIT 1)
WHEN main_trip_offloaded.name IS NOT NULL THEN (SELECT loading_date FROM `tabRoute Steps Table` WHERE parent = main_trip_offloaded.name \
AND parentfield = 'main_route_steps' AND loading_date IS NOT NULL LIMIT 1)
WHEN return_trip_offloaded.name IS NOT NULL THEN (SELECT loading_date FROM `tabRoute Steps Table` WHERE parent = return_trip_offloaded.name \
AND parentfield = 'return_route_steps' AND loading_date IS NOT NULL LIMIT 1)
END AS dispatch_date,
CASE
WHEN main_trip.name IS NOT NULL AND return_trip.name IS NOT NULL THEN NULL
WHEN main_trip_offloaded.name IS NOT NULL THEN (SELECT offloading_date FROM `tabRoute Steps Table` WHERE parent = main_trip_offloaded.name \
AND parentfield = 'main_route_steps' AND offloading_date IS NOT NULL LIMIT 1)
WHEN return_trip_offloaded.name IS NOT NULL THEN (SELECT offloading_date FROM `tabRoute Steps Table` WHERE parent = return_trip_offloaded.name \
AND parentfield = 'return_route_steps' AND offloading_date IS NOT NULL LIMIT 1)
END AS offloading_date,
CASE
WHEN main_trip.name IS NOT NULL THEN (SELECT `tabReporting Status Table`.status FROM `tabReporting Status Table` WHERE \
`tabReporting Status Table`.parenttype = 'Vehicle Trip' AND `tabReporting Status Table`.parent = main_trip.name AND \
`tabReporting Status Table`.parentfield = 'main_reporting_status' ORDER BY `tabReporting Status Table`.datetime DESC LIMIT 1)
WHEN return_trip.name IS NOT NULL THEN (SELECT `tabReporting Status Table`.status FROM `tabReporting Status Table` WHERE \
`tabReporting Status Table`.parenttype = 'Vehicle Trip' AND `tabReporting Status Table`.parent = return_trip.name AND \
`tabReporting Status Table`.parentfield = 'return_reporting_status' ORDER BY `tabReporting Status Table`.datetime DESC LIMIT 1)
WHEN main_trip_offloaded.name IS NOT NULL THEN (SELECT `tabReporting Status Table`.status FROM `tabReporting Status Table` WHERE \
`tabReporting Status Table`.parenttype = 'Vehicle Trip' AND `tabReporting Status Table`.parent = main_trip_offloaded.name AND \
`tabReporting Status Table`.parentfield = 'main_reporting_status' ORDER BY `tabReporting Status Table`.datetime DESC LIMIT 1)
WHEN return_trip_offloaded.name IS NOT NULL THEN (SELECT `tabReporting Status Table`.status FROM `tabReporting Status Table` WHERE \
`tabReporting Status Table`.parenttype = 'Vehicle Trip' AND `tabReporting Status Table`.parent = return_trip_offloaded.name AND \
`tabReporting Status Table`.parentfield = 'return_reporting_status' ORDER BY `tabReporting Status Table`.datetime DESC LIMIT 1)
END AS position,
`tabTransport Assignment`.parenttype AS next_assigned_parenttype,
`tabTransport Assignment`.parent AS next_assigned_parent
FROM
`tabVehicle`
LEFT JOIN
`tabTrailer` ON `tabTrailer`.name = `tabVehicle`.default_trailer
LEFT JOIN
`tabEmployee` ON `tabEmployee`.name = `tabVehicle`.driver
LEFT JOIN
`tabVehicle Trip` AS main_trip ON main_trip.vehicle = `tabVehicle`.name AND main_trip.status = 'En Route'
LEFT JOIN
`tabVehicle Trip` AS return_trip ON return_trip.vehicle = `tabVehicle`.name AND return_trip.status = 'En Route - Returning'
LEFT JOIN
`tabVehicle Trip` AS main_trip_offloaded ON main_trip_offloaded.vehicle = `tabVehicle`.name AND main_trip_offloaded.status = 'Main Trip Offloaded' AND (SELECT loading_date FROM `tabRoute Steps Table` WHERE \
parent = main_trip_offloaded.name AND parentfield = 'main_route_steps' AND offloading_date IS NOT NULL LIMIT 1) IN (%(today)s, %(yesterday)s, %(day_before)s)
LEFT JOIN
`tabVehicle Trip` AS return_trip_offloaded ON return_trip_offloaded.vehicle = `tabVehicle`.name AND return_trip_offloaded.status = 'Main Trip Offloaded' AND (SELECT loading_date FROM `tabRoute Steps Table` WHERE \
parent = return_trip_offloaded.name AND parentfield = 'return_route_steps' AND offloading_date IS NOT NULL LIMIT 1) IN (%(today)s, %(yesterday)s, %(day_before)s)
LEFT JOIN
`tabTransport Assignment` ON `tabTransport Assignment`.name = (SELECT name FROM `tabTransport Assignment` WHERE assigned_vehicle = `tabVehicle`.name \
AND status = 'Not Processed' ORDER BY expected_loading_date ASC LIMIT 1)
LEFT JOIN
`tabTransport Request` ON `tabTransport Request`.name = `tabTransport Assignment`.parent AND `tabTransport Assignment`.parenttype = 'Transport Request'
''', {'today': today, 'yesterday': yesterday, 'day_before': day_before}, as_dict=1)
for row in data:
if row.next_assigned_parenttype and row.next_assigned_parent:
next_assigned = None
if row.next_assigned_parenttype == 'Import':
next_assigned = frappe.get_doc('Import', row.next_assigned_parent).customer
elif row.next_assigned_parenttype == 'Transport Request':
next_assigned = frappe.get_doc('Transport Request', row.next_assigned_parent).customer
row.update({"next_cargo_allocation": next_assigned})
return columns, data
| StarcoderdataPython |
11310583 | <filename>cloudrunner_server/api/__init__.py
VERSION = '0.9'
| StarcoderdataPython |
3259577 | import json
import logging
import os
import re
from backup_initialize.lookup import Lookup
logger = logging.getLogger(__name__)
class InvalidJsonBackupsConfigurationArray(Exception):
# Custom exception raised when the backups configuration json array was not found
def __init__(self):
Exception.__init__(
self, 'Failed to find valid backup configuration json array.')
class InvalidBackupConfiguration(Exception):
# Custom exception raised when all backup configurations are invalid
def __init__(self):
Exception.__init__(
self, 'Backup configuration is invalid or contains invalid settings.')
def get_backup_config(backup_config_path):
backups_config_full_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'settings', 'backup_configs', backup_config_path))
logger.debug(
f'Attempting to load backup configuration: {json.dumps({"File": backups_config_full_path})}')
try:
with open(backups_config_full_path, 'r') as file:
backups_config_dict = json.load(file)
except json.JSONDecodeError as error:
logger.error(
f'No valid JSON data found when attempting to load backup configuration: {json.dumps({"File": backups_config_full_path})}')
raise error
except FileNotFoundError:
logger.error(
f'Backup configuration not found: {json.dumps({"File": backups_config_full_path})}')
raise
if 'BackupConfig' in backups_config_dict:
logger.debug(
f'Successfully loaded backup configuration: {json.dumps({"File": backups_config_full_path})}')
return backups_config_dict['BackupConfig']
else:
logger.error(
f'Backup configuration json array not found: {json.dumps({"File": backups_config_full_path})}')
raise InvalidJsonBackupsConfigurationArray
def validate(backup_config, s3_client):
backup_name = backup_config['Name']
backup_health_check_url = backup_config['HealthCheckUrl']
backup_compression = backup_config['TarCompression']
backup_bucket_name = backup_config['Destination']['Bucket']
backup_file_prefix = backup_config['Destination']['FilePrefix']
backup_sources = backup_config['Sources']
valid_tar_compression = ['gz', 'bz2', 'xz']
invalid_configurations = []
if not backup_name:
invalid_configurations.append(json.dumps({
"Issue": "NameEmpty", "Name": None}))
if backup_compression not in valid_tar_compression:
invalid_configurations.append(json.dumps({
"Issue": "TarCompressionInvalid", "TarCompression": backup_compression, "ValidCompression": valid_tar_compression}))
if not backup_health_check_url:
invalid_configurations.append(json.dumps({
"Issue": "HealthCheckUrlEmpty", "HealthCheckUrl": None}))
if not backup_bucket_name:
invalid_configurations.append(json.dumps({
"Issue": "BucketNameEmpty", "Name": None}))
elif backup_bucket_name:
s3_lookup = Lookup(backup_config, s3_client)
if not s3_lookup.bucket():
invalid_configurations.append(json.dumps({
"Issue": "BucketNameInvalid", "Name": backup_bucket_name}))
if backup_file_prefix:
prefix_end_slash = r"^[a-zA-Z0-9].*[a-zA-Z0-9]\/$"
prefix_no_end_slash = r"^[a-zA-Z0-9].*[a-zA-Z0-9]$"
if not bool(re.compile(prefix_end_slash).match(backup_file_prefix)):
if not bool(re.compile(prefix_no_end_slash).match(backup_file_prefix)):
invalid_configurations.append(json.dumps({
"Issue": "BucketFilePrefixInvalid", "Prefix": backup_file_prefix}))
for source in backup_sources:
if not os.path.exists(source['Path']):
invalid_configurations.append(json.dumps({
"Issue": "SourcePathNotFound", "Source": source}))
if len(invalid_configurations) > 0:
for invalid_configuration in invalid_configurations:
logger.error(
f'Backup name: {backup_name}, Invalid backup configuration: {invalid_configuration}')
raise InvalidBackupConfiguration
elif len(invalid_configurations) == 0:
logger.debug(
f'Validated backup configuration: {json.dumps({"Backup": backup_name})}')
return backup_config
class ValidateBackupConfig:
def __init__(self, s3_client):
self.client = s3_client
def check(self, backup_config_path):
logger.debug('Attempting to validate backup configuration json data.')
self.backups_config = get_backup_config(backup_config_path)
logger.debug(
'Successfully validated backup configuration json data.')
logger.debug(
'Attempting to validate backup configuration settings.')
valid_backup_config = validate(
self.backups_config, self.client)
logger.debug(
'Successfully validated backup configuration settings.')
return valid_backup_config
| StarcoderdataPython |
132360 | <reponame>VWS-Python/vws-python-mock
"""
Validators for the project state.
"""
from typing import Dict, Set
from mock_vws._database_matchers import get_database_matching_client_keys
from mock_vws._query_validators.exceptions import InactiveProject
from mock_vws.database import VuforiaDatabase
from mock_vws.states import States
def validate_project_state(
request_path: str,
request_headers: Dict[str, str],
request_body: bytes,
request_method: str,
databases: Set[VuforiaDatabase],
) -> None:
"""
Validate the state of the project.
Args:
request_path: The path of the request.
request_headers: The headers sent with the request.
request_body: The body of the request.
request_method: The HTTP method of the request.
databases: All Vuforia databases.
Raises:
InactiveProject: The project is inactive.
"""
database = get_database_matching_client_keys(
request_headers=request_headers,
request_body=request_body,
request_method=request_method,
request_path=request_path,
databases=databases,
)
assert isinstance(database, VuforiaDatabase)
if database.state != States.PROJECT_INACTIVE:
return
raise InactiveProject
| StarcoderdataPython |
4837260 | from sgan.models.transformer.decoder import Decoder
from sgan.models.transformer.multihead_attention import MultiHeadAttention
from sgan.models.transformer.positional_encoding import PositionalEncoding
from sgan.models.transformer.pointerwise_feedforward import PointerwiseFeedforward
from sgan.models.transformer.encoder_decoder import EncoderDecoder
from sgan.models.transformer.encoder import Encoder
from sgan.models.transformer.encoder_layer import EncoderLayer
from sgan.models.transformer.decoder_layer import DecoderLayer
from sgan.models.transformer.high_lv_encoder import HighLVEncoder
from sgan.models.transformer.high_lv_decoder import HighLVDecoder
import torch.nn as nn
import math
class TransformerEncoder(nn.Module):
def __init__(self, enc_inp_size, n=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
super(TransformerEncoder, self).__init__()
"""Helper: Construct a model from hyperparameters."""
self.encoder = HighLVEncoder(
Encoder(EncoderLayer(
d_model,
MultiHeadAttention(h, d_model),
PointerwiseFeedforward(d_model, d_ff, dropout),
dropout
), n),
nn.Sequential(LinearEmbedding(enc_inp_size, d_model), PositionalEncoding(d_model, dropout))
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p_e in self.encoder.parameters():
if p_e.dim() > 1:
nn.init.xavier_uniform_(p_e)
def forward(self, objs_traj, src_att):
return self.encoder(objs_traj, src_att)
class TransformerDecoder(nn.Module):
def __init__(self, dec_inp_size, dec_out_size, n=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
super(TransformerDecoder, self).__init__()
"""Helper: Construct a model from hyperparameters."""
self.decoder = HighLVDecoder(
Decoder(DecoderLayer(
d_model,
MultiHeadAttention(h, d_model),
MultiHeadAttention(h, d_model),
PointerwiseFeedforward(d_model, d_ff, dropout),
dropout
), n),
nn.Sequential(LinearEmbedding(dec_inp_size, d_model), PositionalEncoding(d_model, dropout)),
Generator(d_model, dec_out_size)
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p_d in self.decoder.parameters():
if p_d.dim() > 1:
nn.init.xavier_uniform_(p_d)
def forward(self, encoder_h, src_att, dec_inp, trg_att):
return self.decoder(encoder_h, src_att, dec_inp, trg_att)
class LinearEmbedding(nn.Module):
def __init__(self, inp_size, d_model):
super(LinearEmbedding, self).__init__()
# lut => lookup table
self.lut = nn.Linear(inp_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class Generator(nn.Module):
"""
Define standard linear + softmax generation step.
"""
def __init__(self, d_model, out_size):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, out_size)
def forward(self, x):
return self.proj(x)
| StarcoderdataPython |
220425 | <reponame>AustralianSynchrotron/bluesky-queueserver
# flake8: noqa
from typing import List, Optional, Dict, Any
from bluesky.plans import (
count,
list_scan,
rel_list_scan,
list_grid_scan,
rel_list_grid_scan,
log_scan,
rel_log_scan,
adaptive_scan,
rel_adaptive_scan,
tune_centroid,
scan_nd,
inner_product_scan,
scan,
grid_scan,
rel_grid_scan,
relative_inner_product_scan,
rel_scan,
tweak,
spiral_fermat,
rel_spiral_fermat,
spiral,
rel_spiral,
spiral_square,
rel_spiral_square,
ramp_plan,
fly,
x2x_scan,
)
def marked_up_count(
detectors: List, num: int = 1, delay: Optional[float] = None, md: Optional[Dict[str, Any]] = None
):
return (yield from count(detectors, num=num, delay=delay, md=md))
| StarcoderdataPython |
3392995 | # Create your forms here.
from django import forms
class CreateForm(forms.Form):
domain_id = forms.CharField(required=True, widget=forms.HiddenInput)
group_name = forms.CharField(required=True)
description = forms.CharField(
required=True,
widget=forms.Textarea
)
group_owner = forms.CharField(required=True, widget=forms.HiddenInput)
CHOICES1 = (('1', 'User Level',), ('2', 'Admin Level',))
group_type = forms.ChoiceField(
widget=forms.HiddenInput, choices=CHOICES1, required=True)
CHOICES2 = (('0', 'Single User',), ('1', 'Multi User',))
group_cardinality = forms.ChoiceField(
widget=forms.HiddenInput, choices=CHOICES2, required=True)
# class AddForm(forms.Form):
# users = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, required=False)
# class RemoveForm(forms.Form):
# members = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, required=False)
class AddForm(forms.Form):
def __init__(self, data=None, user_choices=None):
super().__init__(data=data)
self.fields["users"] = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple, choices=user_choices)
class RemoveForm(forms.Form):
def __init__(self, data=None, user_choices=None):
super().__init__(data=data)
self.fields["members"] = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple, choices=user_choices)
| StarcoderdataPython |
3563814 | def fibonacci():
n1 = 0
n2 = 1
while True:
yield n1
n1, n2 = n2, n1 + n2
generator = fibonacci()
for i in range(8):
print(next(generator))
| StarcoderdataPython |
354722 | <reponame>disktnk/chainer-compiler<filename>ch2o/tests/node/Roi.py
# coding: utf-8
import argparse
import pickle
import sys
import os
import chainer
import chainer.functions as F
from chainer.backends import cuda
class ROIPool2D(chainer.Chain):
def __init__(self, fn, outsize, spatial_scale):
super(ROIPool2D, self).__init__()
self.fn = fn
self.outsize = outsize
self.spatial_scale = spatial_scale
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7, 1.2)
class ROIAlign2D(chainer.Chain):
def __init__(self, fn, outsize, spatial_scale, sampling_ratio):
super(ROIAlign2D, self).__init__()
self.fn = fn
self.outsize = outsize
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.25, 2)
class FPN_ROIAlign2D_1st_scale(chainer.Chain):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.outsize = 7
self.spatial_scale = 1 / 4
self.sampling_ratio = 2
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.25, 2)
class FPN_ROIAlign2D_2nd_scale(chainer.Chain):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.outsize = 7
self.spatial_scale = 1 / 8
self.sampling_ratio = 2
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.125, 2)
class FPN_ROIAlign2D_3rd_scale(chainer.Chain):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.outsize = 7
self.spatial_scale = 1 / 16
self.sampling_ratio = 2
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.0625, 2)
class FPN_ROIAlign2D_4th_scale(chainer.Chain):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.outsize = 7
self.spatial_scale = 1 / 32
self.sampling_ratio = 2
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.03125, 2)
class FPN_ROIAlign2D_5th_scale(chainer.Chain):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.outsize = 7
self.spatial_scale = 1 / 64
self.sampling_ratio = 2
def forward(self, x, rois, roi_indices):
return self.fn(x, rois, roi_indices, 7,
0.015625, 2)
# ======================================
import ch2o
if __name__ == '__main__':
import numpy as np
if len(sys.argv) > 1 and sys.argv[1].startswith('-'):
parser = argparse.ArgumentParser()
parser.add_argument(
'--data',
type=str,
default=None,
help='pickle data which contains hs, rois and roi_indices')
parser.add_argument(
'--index',
type=int,
default=0,
help='which h in hs is used')
parser.add_argument(
'--out',
type=str,
default=".",
help='')
parser.add_argument('--gpu', action='store_true')
args = parser.parse_args()
print(args.data)
with open(args.data, "rb") as f:
data = pickle.load(f)
hs = [data_var.array for data_var in data["hs"]]
rois = data["rois"]
roi_indices = data["roi_indices"]
if args.gpu:
hs = [chainer.cuda.to_gpu(h) for h in hs]
rois = chainer.cuda.to_gpu(rois)
roi_indices = chainer.cuda.to_gpu(roi_indices)
assert(len(hs) == 5)
assert(len(rois) == 5)
assert(len(roi_indices) == 5)
for i, FPN_ROIAlign2D in enumerate((FPN_ROIAlign2D_1st_scale,
FPN_ROIAlign2D_2nd_scale,
FPN_ROIAlign2D_3rd_scale,
FPN_ROIAlign2D_4th_scale,
FPN_ROIAlign2D_5th_scale)):
ch2o.generate_testcase(
FPN_ROIAlign2D(F.roi_average_align_2d),
[hs[i], rois[i], roi_indices[i]],
output_dir=os.path.join(args.out, "fpn_roi_align_2d_pyramid{}_scale".format(i)),
use_gpu=args.gpu)
else:
x = np.arange(2 * 3 * 5 * 5).reshape((2, 3, 5, 5)).astype(np.float32)
rois = np.array([[0, 1, 3, 4], [1, 0.3, 4, 2.6]]).astype(np.float32)
roi_indices = np.array([0, 1]).astype(np.int32)
ch2o.generate_testcase(ROIPool2D(F.roi_max_pooling_2d, 7, 1.2),
[x, rois, roi_indices],
subname='max_pool')
ch2o.generate_testcase(ROIPool2D(F.roi_average_pooling_2d, 7, 1.2),
[x, rois, roi_indices],
subname='avg_pool')
ch2o.generate_testcase(ROIAlign2D(F.roi_max_align_2d, 7, 1.2, 2),
[x, rois, roi_indices],
subname='max_align')
ch2o.generate_testcase(ROIAlign2D(F.roi_average_align_2d, 7, 1.2, 3),
[x, rois, roi_indices],
subname='avg_align')
| StarcoderdataPython |
1687896 | <filename>filey/__init__.py
from .handles import *
from .shell import *
from .walking import *
from .persistence import *
from .shortcuts import *
if __name__ == "__main__":
pass | StarcoderdataPython |
4991665 | <filename>Crawler/server.py
from flask import Flask
from flask_restful import Api, Resource
from query import main, statistics_by_id
app = Flask(__name__)
api = Api(app)
class Query(Resource):
def get(self, query):
videos = main(query)
response = app.response_class(
response=videos,
status=200,
mimetype='application/json'
)
return response
api.add_resource(Query, "/query/<string:query>")
class Statistics(Resource):
def get(self, id):
statistics = statistics_by_id(id)
response = app.response_class(
response=statistics,
status=200,
mimetype='application/json'
)
return response
api.add_resource(Statistics, "/statistics/<string:id>")
app.run(port=3000)
| StarcoderdataPython |
3569373 | # coding: utf-8
"""
TileDB Storage Platform API
TileDB Storage Platform REST API # noqa: E501
The version of the OpenAPI document: 2.2.19
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import tiledb.cloud.rest_api
from tiledb.cloud.rest_api.api.invitation_api import InvitationApi # noqa: E501
from tiledb.cloud.rest_api.rest import ApiException
class TestInvitationApi(unittest.TestCase):
"""InvitationApi unit test stubs"""
def setUp(self):
self.api = (
tiledb.cloud.rest_api.api.invitation_api.InvitationApi()
) # noqa: E501
def tearDown(self):
pass
def test_accept_invitation(self):
"""Test case for accept_invitation"""
pass
def test_cancel_join_organization(self):
"""Test case for cancel_join_organization"""
pass
def test_cancel_share_array_by_invite(self):
"""Test case for cancel_share_array_by_invite"""
pass
def test_fetch_invitations(self):
"""Test case for fetch_invitations"""
pass
def test_join_organization(self):
"""Test case for join_organization"""
pass
def test_share_array_by_invite(self):
"""Test case for share_array_by_invite"""
pass
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4961164 | <filename>code/classifier/lstm_1000.py<gh_stars>1-10
import csv
import datetime
import json
import os
import time
import pickle
import gc
from os.path import expanduser, exists
from pprint import pprint
from zipfile import ZipFile
import numpy as np
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from keras import backend as K, optimizers
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Embedding, Dense, Merge, BatchNormalization, TimeDistributed, Lambda, LSTM, SimpleRNN, Dropout, \
Input, Bidirectional
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.utils.data_utils import get_file
from helper import WordNet_44_categories, GR_19_categories, POS_15_categories, lst_2_dic, sequence_from_dic, \
train_val_test_split
RNG_SEED = 123123
np.random.seed(RNG_SEED)
KERAS_DATASETS_DIR = expanduser('~/.keras/datasets/')
GLOVE_ZIP_FILE_URL = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
GLOVE_ZIP_FILE = 'glove.840B.300d.zip'
GLOVE_FILE = 'glove.840B.300d.txt'
SENNA_EMD_FILE = '/home/frank/relation/embedding/senna/embeddings/merged_embeddings.txt'
SRL_EMD_FILE = 'data/EMD/out_51k_50_corpus.emd'
LEMMA_EMD_FILE = 'data/EMD/96w_corpus.emd'
CORPUS_EMD_FILE = 'data/EMD/all_50.emd'
LABELED_FILE = 'data/ORIG/label1000.txt'
LEMMA_FILE = 'data/LEM/label1000.lemma'
SRL_FILE = 'data/SRL/out_1000.txt'
NORM_FILE = 'data/NORM/label1000.norm_space'
POSITION_A_FILE = 'data/POS/out_1000.posa'
POSITION_B_FILE = 'data/POS/out_1000.posb'
COMPRESSED_FILE = 'data/COMPRESSED/label1000.compressed_wordnet'
POSTAG_FILE = 'data/POSTAG/label1000.postag'
TRAIN_VAL_TEST_ID_FILE = 'ttt_id.json'
WORD_EMBEDDING_MATRIX_FILE = 'word_embedding_matrix.npy'
MAX_NB_WORDS = 20000
MAX_SEQUENCE_LENGTH = 40
EMBEDDING_DIM = 25
POS_EMBEDDING_DIM = 5
MODEL_WEIGHTS_FILE = 'lstm_weights.h5'
VALIDATION_SPLIT = 0.25
TEST_SPLIT = 0.2
NB_EPOCHS = 40
all_text_data = []
labels = []
lemma_sentences = []
position_a_seqs = []
position_b_seqs = []
sentences = []
srl_sentences = []
norm_sentences = []
postag_list = []
terma_list = []
termb_list = []
# load all text for tokenizer
DIR_NAME = '/home/frank/relation/dataset/gutenberg/pair_sentences/norm_sent'
for filename in os.listdir(DIR_NAME):
if filename.endswith('.norm_space'):
file = os.path.join(DIR_NAME, filename)
for line in open(file, encoding='utf-8'):
all_text_data.append(line.strip())
with open(LABELED_FILE, encoding='utf-8') as labeled_file, \
open(LEMMA_FILE, encoding='utf-8') as lemma_file, \
open(SRL_FILE, encoding='utf-8') as srl_file, \
open(NORM_FILE, encoding='utf-8') as norm_file, \
open(POSTAG_FILE, encoding='utf-8') as postag_file:
for line1, line2, line3, line4, line5 in zip(labeled_file, lemma_file, srl_file, norm_file, postag_file):
ls1 = line1.strip().split('\t')
terma_list.append(ls1[2])
termb_list.append(ls1[3])
labels.append(ls1[4])
lemma_sentences.append(line2.strip())
srl_sentences.append(line3.strip())
norm_sentences.append(line4.strip())
postag_list.append(line5.strip())
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(all_text_data + norm_sentences)
pickle.dump(tokenizer, open('tokenizer.pk', 'wb'))
word_sequences = tokenizer.texts_to_sequences(norm_sentences)
word_index = tokenizer.word_index
# terma_index = word_index['terma']
# termb_index = word_index['termb']
#
# # calculate position sequences
# for sent in word_sequences:
# position_a = sent.index(terma_index)
# position_b = sent.index(termb_index)
# position_a_seqs.append(' '.join([str(i - position_a) for i in range(len(sent))]))
# position_b_seqs.append(' '.join([str(i - position_b) for i in range(len(sent))]))
# seq_tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='')
# seq_tokenizer.fit_on_texts(position_a_seqs + position_b_seqs)
# position_a_seqs = seq_tokenizer.texts_to_sequences(position_a_seqs)
# position_b_seqs = seq_tokenizer.texts_to_sequences(position_b_seqs)
# pos_index = seq_tokenizer.word_index
print("Words in index: %d" % len(word_index))
nb_words = min(MAX_NB_WORDS, len(word_index))
# print("Positions in index: %d" % len(pos_index))
# nb_pos = min(MAX_NB_WORDS, len(pos_index))
embeddings_index = {}
with open(SENNA_EMD_FILE, encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Total Word embeddings: %d' % len(embeddings_index))
#
# word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
# for word, i in word_index.items():
# if i > MAX_NB_WORDS:
# continue
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# word_embedding_matrix[i] = embedding_vector
#
# print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# np.save(WORD_EMBEDDING_MATRIX_FILE, word_embedding_matrix)
# assert len(word_sequences) == len(position_a_seqs)
# assert len(position_a_seqs) == len(position_b_seqs)
# assert len(position_b_seqs) == len(labels)
# assert len(labels) == len(terma_list)
# assert len(terma_list) == len(termb_list)
word_sequences = pad_sequences(word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# position_a_seqs = pad_sequences(position_a_seqs, maxlen=MAX_SEQUENCE_LENGTH)
# position_b_seqs = pad_sequences(position_b_seqs, maxlen=MAX_SEQUENCE_LENGTH)
terma_vectors = np.array([embeddings_index[word] for word in terma_list])
termb_vectors = np.array([embeddings_index[word] for word in termb_list])
termab_vectors = np.concatenate((terma_vectors, termb_vectors), axis=1)
labels = np.array(labels, dtype=int)
# X = np.stack((word_sequences, position_a_seqs, position_b_seqs), axis=1)
X_train, X_val, X_test = train_val_test_split(word_sequences, id_file=TRAIN_VAL_TEST_ID_FILE)
termab_train, termab_val, termab_test = train_val_test_split(termab_vectors, id_file=TRAIN_VAL_TEST_ID_FILE)
y_train, y_val, y_test = train_val_test_split(labels, id_file=TRAIN_VAL_TEST_ID_FILE)
print(termab_train.shape)
print('Shape of train data tensor:', X_train.shape)
print('Shape of val data tensor:', X_val.shape)
print('Shape of test tensor:', X_test.shape)
word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
# position_embedding_matrix = np.zeros((nb_pos + 1, POS_EMBEDDING_DIM))
W = Sequential()
W.add(Embedding(nb_words + 1, EMBEDDING_DIM, weights=[word_embedding_matrix], trainable=True, mask_zero=True))
# W.add(Dropout(0.2))
# W.add(BatchNormalization())
# POS1 = Sequential()
# POS1.add(Embedding(nb_pos + 1, POS_EMBEDDING_DIM, weights=[position_embedding_matrix], trainable=True, mask_zero=True))
# POS1.add(BatchNormalization())
#
# POS2 = Sequential()
# POS2.add(Embedding(nb_pos + 1, POS_EMBEDDING_DIM, weights=[position_embedding_matrix], trainable=True, mask_zero=True))
# POS2.add(BatchNormalization())
model_LSTM = Sequential()
model_LSTM.add(W)
model_LSTM.add(LSTM(50, dropout=0.5, recurrent_dropout=0.5))
# model.add(Dense(50, activation='relu'))
# model.add(BatchNormalization())
termab = Sequential()
termab.add(BatchNormalization(input_shape=(100,)))
model = Sequential()
model.add(Merge([model_LSTM, termab], mode='concat'))
model.add(BatchNormalization())
model.add(Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=0.01, decay=1e-5, momentum=0.9, nesterov=True)
adam = optimizers.Adam()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
callbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_acc', save_best_only=True),
EarlyStopping(monitor='val_acc', patience=5)]
print("Starting training at", datetime.datetime.now())
t0 = time.time()
history = model.fit([X_train, termab_train], y_train,
epochs=NB_EPOCHS,
validation_data=([X_val, termab_val], y_val),
batch_size=20,
verbose=1,
callbacks=callbacks)
t1 = time.time()
print("Training ended at", datetime.datetime.now())
print("Minutes elapsed: %f" % ((t1 - t0) / 60.))
model.load_weights(MODEL_WEIGHTS_FILE)
y_pred = model.predict_classes([X_test, termab_test], batch_size=200)
print('\n F:', f1_score(y_test, y_pred))
gc.collect()
| StarcoderdataPython |
11343020 | import torch
import os
import warnings
class EMA:
# Found this useful (thanks alexis-jacq):
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/3
def __init__(
self, gamma=0.99, save=True, save_frequency=5, save_filename="ema_weights.pth"
):
"""
Initialize the weight to which we will do the
exponential moving average and the dictionary
where we store the model parameters
"""
self.gamma = gamma
self.registered = {}
self.save_filename = save_filename
self.save_frequency = save_frequency
self.count = 0
if not save:
warnings.warn(
"Note that the exponential moving average weights will not be saved to a .pth file!"
)
if save_filename in os.listdir("."):
self.registered = torch.load(self.save_filename)
def __call__(self, model):
self.count += 1
for name, param in model.named_parameters():
if param.requires_grad:
new_weight = (
param.clone().detach()
if name not in self.registered
else self.gamma * param + (1 - self.gamma) * self.registered[name]
)
self.registered[name] = new_weight
if self.count % self.save_frequency == 0:
self.save_ema_weights()
def copy_weights_to(self, model):
for name, param in model.named_parameters():
if param.requires_grad:
param.data = self.registered[name]
def save_ema_weights(self):
torch.save(self.registered, self.save_filename) | StarcoderdataPython |
5110399 | from pathlib import Path
from setuptools import setup
from setuptools import find_packages
PROJECT = "redex"
PROJECT_DIR = f"src/{PROJECT}"
REPOSITORY = f"manifest/{PROJECT}"
README = (Path(__file__).parent / "README.md").read_text()
# Setup project version.
__version__ = None
with open(f"{PROJECT_DIR}/version.py") as file:
exec(file.read(), globals())
# Setup keywords.
# https://setuptools.readthedocs.io/en/latest/references/keywords.html
setup(
name=PROJECT,
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
url=f"https://github.com/{REPOSITORY}",
description="A combinator library for designing algorithms",
long_description=README,
long_description_content_type="text/markdown",
project_urls={
"Documentation": f"https://{PROJECT}.readthedocs.io",
"Source Code": f"https://github.com/{REPOSITORY}",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Typing :: Typed",
],
# Required for mypy to find the installed package.
packages=find_packages(where="src"),
package_dir={"": "src"},
package_data={PROJECT: ["py.typed"]},
zip_safe=False,
python_requires=">=3.9",
install_requires=[
],
extras_require={
"docs": ["sphinx", "furo", "nbsphinx", "ipykernel"],
"development": ["hypothesis", "pylint"],
},
)
| StarcoderdataPython |
1678164 | if __name__ == '__main__':
from kervi.application import Application
APP = Application()
#Important GPIO must be imported after application creation
from kervi.hal import GPIO
from kervi.dashboards import Dashboard, DashboardPanel
Dashboard(
"app",
"App",
[
DashboardPanel("fan", title="CPU fan")
],
is_default=True
)
Dashboard(
"system",
"System",
[
DashboardPanel("cpu"),
DashboardPanel("cam")
]
)
#Create a streaming camera server
from kervi.vision.camera import CameraStreamer
CAMERA = CameraStreamer("cam1", "camera 1")
#link camera as background
CAMERA.link_to_dashboard("app")
#link camera to a panel
CAMERA.link_to_dashboard("system", "cam")
from kervi.sensors.sensor import Sensor
from kervi.devices.platforms.common.sensors.cpu_use import CPULoadSensorDeviceDriver
from kervi.devices.platforms.common.sensors.cpu_temp import CPUTempSensorDeviceDriver
#build in sensor that measures cpu use
SENSOR_CPU_LOAD = Sensor("CPULoadSensor", "CPU", CPULoadSensorDeviceDriver())
#link to sys area top right
SENSOR_CPU_LOAD.link_to_dashboard("*", "sys-header")
#link to a panel, show value in panel header and chart in panel body
SENSOR_CPU_LOAD.link_to_dashboard("system", "cpu", type="value", link_to_header=True)
SENSOR_CPU_LOAD.link_to_dashboard("system", "cpu", type="chart")
#build in sensor that measures cpu temperature
SENSOR_CPU_TEMP = Sensor("CPUTempSensor", "", CPUTempSensorDeviceDriver())
#link to sys area top right
SENSOR_CPU_TEMP.link_to_dashboard("*", "sys-header")
#More on sensors https://kervi.github.io/sensors.html
#define a light controller
from kervi.controllers.controller import Controller
from kervi.values import NumberValue, BooleanValue
from kervi.actions import action, Actions
class FanController(Controller):
def __init__(self):
Controller.__init__(self, "fan_controller", "Fan")
self.type = "fan"
self.temp = self.inputs.add("temp", "Temperature", NumberValue)
self.temp.min = 0
self.temp.max = 150
self.trigger_temp = self.inputs.add("trigger_temp", "Trigger temperature", NumberValue)
self.trigger_temp.min = 0
self.trigger_temp.max = 100
#remember the value when app restarts
self.trigger_temp.persist_value = True
self.max_temp = self.inputs.add("max_temp", "Max speed temperature", NumberValue)
self.max_temp.min = 0
self.max_temp.max = 100
#remember the value when app restarts
self.max_temp.persist_value = True
self.fan_speed = self.outputs.add("fan_speed", "Fanspeed", NumberValue)
self._active = True
@action
def start(self):
self._active = True
self._calc_fan_speed()
@action
def stop(self):
self._active = False
self._calc_fan_speed()
def _calc_fan_speed(self):
if self._active:
temp = self.temp.value - self.trigger_temp.value
if temp <= 0:
self.fan_speed.value = 0
else:
max_span = self.max_temp.value - self.trigger_temp.value
if max_span != 0:
speed = (temp / max_span) * 100
if speed > 100:
speed = 100
self.fan_speed.value = speed
else:
self.fan_speed.value = 0
def input_changed(self, changed_input):
self._calc_fan_speed()
FAN_CONTROLLER = FanController()
#link the fan controllers temp input to cpu temperature sensor
#The temp sensor is loaded in another process and linked via its id
FAN_CONTROLLER.temp.link_to("CPUTempSensor")
FAN_CONTROLLER.temp.link_to_dashboard("app", "fan")
#link the other fan controller inputs to dashboard
FAN_CONTROLLER.trigger_temp.link_to_dashboard("app", "fan")
FAN_CONTROLLER.max_temp.link_to_dashboard("app", "fan")
FAN_CONTROLLER.fan_speed.link_to_dashboard("app", "fan")
#link controller actions
FAN_CONTROLLER.actions["start"].link_to_dashboard("app", "fan", inline=True, label=None, button_text="Start")
FAN_CONTROLLER.actions["stop"].link_to_dashboard("app", "fan", inline=True, label=None, button_text="Stop")
@action
def app_main():
Actions["fan_controller.start"]()
@action
def app_exit():
Actions["fan_controller.stop"]()
APP.run()
| StarcoderdataPython |
3277639 | <gh_stars>0
import sys
import numpy as np
from keras import optimizers
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
# use non standard flow_from_directory
from utils.image_preprocessing_v2 import ImageDataGenerator
# it outputs y_batch that contains onehot targets and logits
# logits came from xception
from keras.models import Model
from keras.layers import Lambda, concatenate, Activation
from keras.losses import categorical_crossentropy as logloss
from keras.metrics import categorical_accuracy, top_k_categorical_accuracy
from keras import backend as K
from models.minixception import miniXception, preprocess_input
from utils.knowledge_distallion_loss_fn import knowledge_distillation_loss as distill_fn
from utils import metric_functions as mf
from utils.plot_utils import plot_utils as plt_uts
from utils.history_utils import history_utils as hist_uts
from utils.save_utils import save_utils as save_uts
import matplotlib.pyplot as plt
import constants as c
def distill(temperature = 5.0, lambda_const = 0.07, num_residuals = 0):
print('############# Temperature #############')
print('############# {} #############'.format(temperature))
print('########################################')
print('############# lambda_const #############')
print('############# {} #############' .format(lambda_const))
print('########################################')
print('############# num_residuals #############')
print('############# {} #############' .format(num_residuals))
print('########################################')
data_dir = c.data_dir
train_logits = np.load(data_dir + 'train_logits.npy')[()]
val_logits = np.load(data_dir + 'val_logits.npy')[()]
data_generator = ImageDataGenerator(
rotation_range=30,
zoom_range=0.3,
horizontal_flip=True,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.001,
channel_shift_range=0.1,
fill_mode='reflect',
# data_format='channels_last',
# preprocessing_function=preprocess_input
data_format='channels_last',
preprocessing_function=preprocess_input
)
data_generator2 = ImageDataGenerator(
data_format='channels_last',
preprocessing_function=preprocess_input
)
# note: i'm also passing dicts of logits
train_generator = data_generator.flow_from_directory(
data_dir + 'train', train_logits,
target_size=(299, 299),
batch_size=16
)
val_generator = data_generator2.flow_from_directory(
data_dir + 'val', val_logits,
target_size=(299, 299),
batch_size=16
)
model = miniXception(weight_decay=1e-5, num_residuals=num_residuals)
# remove softmax
model.layers.pop()
# usual probabilities
logits = model.layers[-1].output
probabilities = Activation('softmax')(logits)
# softed probabilities
logits_T = Lambda(lambda x: x / temperature)(logits)
probabilities_T = Activation('softmax')(logits_T)
output = concatenate([probabilities, probabilities_T])
model = Model(model.input, output)
# logloss with only soft probabilities and targets
def soft_logloss(y_true, y_pred):
logits = y_true[:, 256:]
y_soft = K.softmax(logits / temperature)
y_pred_soft = y_pred[:, 256:]
return logloss(y_soft, y_pred_soft)
# Train student model
model.compile(
optimizer=optimizers.SGD(lr=1e-2, momentum=0.9, nesterov=True),
loss=lambda y_true, y_pred: distill_fn(y_true, y_pred, lambda_const, temperature),
metrics=[mf.accuracy, mf.top_5_accuracy, mf.categorical_crossentropy, soft_logloss]
)
model.fit_generator(
train_generator,
steps_per_epoch=40, epochs=300, verbose=1,
callbacks=[
EarlyStopping(monitor='val_acc', patience=4, min_delta=0.01),
ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=2, min_delta=0.007)
],
validation_data=val_generator, validation_steps=80, workers=4
)
plt_uts(model, 'miniXception', temperature, lambda_const, num_residuals)
hist_uts(model, 'miniXception', temperature, lambda_const, num_residuals)
save_uts(model, 'miniXception', temperature, lambda_const, num_residuals)
val_generator_no_shuffle = data_generator.flow_from_directory(
data_dir + 'val_no_resizing', val_logits,
target_size=(299, 299),
batch_size=16, shuffle=False
)
print(model.evaluate_generator(val_generator_no_shuffle, 80))
if __name__ == '__main__':
_temperature = float(sys.argv[1])
_lambda_const = float(sys.argv[2])
_num_residuals = int(sys.argv[3])
distill(_temperature, _lambda_const, _num_residuals) | StarcoderdataPython |
189896 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from https://raw.githubusercontent.com/facebookresearch/torchbeast/3f3029cf3d6d488b8b8f952964795f451a49048f/torchbeast/monobeast.py
# and modified
import os
import logging
import pprint
import time
import timeit
import traceback
import typing
import copy
import psutil
import numpy as np
import queue
import cloudpickle
from torch.multiprocessing import Pool
import threading
import json
import shutil
import signal
import torch
import multiprocessing as py_mp
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from continual_rl.policies.impala.torchbeast.core import environment
from continual_rl.policies.impala.torchbeast.core import prof
from continual_rl.policies.impala.torchbeast.core import vtrace
from continual_rl.utils.utils import Utils
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
class LearnerThreadState():
STARTING, RUNNING, STOP_REQUESTED, STOPPED = range(4)
def __init__(self):
"""
This class is a helper class to manage communication of state between threads. For now I'm assuming just
setting state is atomic enough to not require further thread safety.
"""
self.state = self.STARTING
self.lock = threading.Lock()
def wait_for(self, desired_state_list, timeout=300):
time_passed = 0
delta = 0.1 # seconds
while self.state not in desired_state_list and time_passed < timeout:
time.sleep(delta)
time_passed += delta
if time_passed > timeout:
print(f"Gave up on waiting due to timeout. Desired list: {desired_state_list}, current state: {self.state}") # TODO: not print
class Monobeast():
def __init__(self, model_flags, observation_space, action_spaces, policy_class):
self._model_flags = model_flags
# The latest full episode's set of observations generated by actor with actor_index == 0
self._videos_to_log = py_mp.Manager().Queue(maxsize=1)
# Moved some of the original Monobeast code into a setup function, to make class objects
self.buffers, self.actor_model, self.learner_model, self.optimizer, self.plogger, self.logger, self.checkpointpath \
= self.setup(model_flags, observation_space, action_spaces, policy_class)
self._scheduler_state_dict = None # Filled if we load()
self._scheduler = None # Task-specific, so created there
# Keep track of our threads/processes so we can clean them up.
self._learner_thread_states = []
self._actor_processes = []
# train() will get called multiple times (once per task, per cycle). The current assumption is that only
# one train() should be running a time, and that all others have been cleaned up. These parameters help us
# ensure this is true.
self._train_loop_id_counter = 0
self._train_loop_id_running = None
# If we're reloading a task, we need to start from where we left off. This gets populated by load, if
# applicable
self.last_timestep_returned = 0
# Created during train, saved so we can die cleanly
self.free_queue = None
self.full_queue = None
# Pillow sometimes pollutes the logs, see: https://github.com/python-pillow/Pillow/issues/5096
logging.getLogger("PIL.PngImagePlugin").setLevel(logging.CRITICAL + 1)
# Functions designed to be overridden by subclasses of Monobeast
def on_act_unroll_complete(self, task_flags, actor_index, agent_output, env_output, new_buffers):
"""
Called after every unroll in every process running act(). Note that this happens in separate processes, and
data will need to be shepherded accordingly.
"""
pass
def get_batch_for_training(self, batch):
"""
Create a new batch based on the old, with any modifications desired. (E.g. augmenting with entries from
a replay buffer.) This is run in each learner thread.
"""
return batch
def custom_loss(self, task_flags, model, initial_agent_state):
"""
Create a new loss. This is added to the existing losses before backprop. Any returned stats will be added
to the logged stats. If a stat's key ends in "_loss", it'll automatically be plotted as well.
This is run in each learner thread.
:return: (loss, dict of stats)
"""
return 0, {}
# Core Monobeast functionality
def setup(self, model_flags, observation_space, action_spaces, policy_class):
os.environ["OMP_NUM_THREADS"] = "1"
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
logger = Utils.create_logger(os.path.join(model_flags.savedir, "impala_logs.log"))
plogger = Utils.create_logger(os.path.join(model_flags.savedir, "impala_results.log"))
checkpointpath = os.path.join(model_flags.savedir, "model.tar")
if model_flags.num_buffers is None: # Set sensible default for num_buffers.
model_flags.num_buffers = max(2 * model_flags.num_actors, model_flags.batch_size)
if model_flags.num_actors >= model_flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if model_flags.num_buffers < model_flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
# Convert the device string into an actual device
model_flags.device = torch.device(model_flags.device)
model = policy_class(observation_space, action_spaces, model_flags.use_lstm)
buffers = self.create_buffers(model_flags, observation_space.shape, model.num_actions)
model.share_memory()
learner_model = policy_class(
observation_space, action_spaces, model_flags.use_lstm
).to(device=model_flags.device)
if model_flags.optimizer == "rmsprop":
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=model_flags.learning_rate,
momentum=model_flags.momentum,
eps=model_flags.epsilon,
alpha=model_flags.alpha,
)
elif model_flags.optimizer == "adam":
optimizer = torch.optim.Adam(
learner_model.parameters(),
lr=model_flags.learning_rate,
)
else:
raise ValueError(f"Unsupported optimizer type {model_flags.optimizer}.")
return buffers, model, learner_model, optimizer, plogger, logger, checkpointpath
def compute_baseline_loss(self, advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(self, logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(self, logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
self,
model_flags,
task_flags,
actor_index: int,
free_queue: py_mp.Queue,
full_queue: py_mp.Queue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
env = None
try:
self.logger.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env, seed = Utils.make_env(task_flags.env_spec, create_seed=True)
self.logger.info(f"Environment and libraries setup with seed {seed}")
# Parameters involved in rendering behavior video
observations_to_render = [] # Only populated by actor 0
env = environment.Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, task_flags.action_space_id, agent_state)
# Make sure to kill the env cleanly if a terminate signal is passed. (Will not go through the finally)
def end_task(*args):
env.close()
signal.signal(signal.SIGTERM, end_task)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(model_flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, task_flags.action_space_id, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
# Save off video if appropriate
if actor_index == 0:
if env_output['done'].squeeze():
# If we have a video in there, replace it with this new one
try:
self._videos_to_log.get(timeout=1)
except queue.Empty:
pass
except (FileNotFoundError, ConnectionRefusedError, ConnectionResetError, RuntimeError) as e:
# Sometimes it seems like the videos_to_log socket fails. Since video logging is not
# mission-critical, just let it go.
self.logger.warning(
f"Video logging socket seems to have failed with error {e}. Aborting video log.")
pass
self._videos_to_log.put(copy.deepcopy(observations_to_render))
observations_to_render.clear()
observations_to_render.append(env_output['frame'].squeeze(0).squeeze(0)[-1])
timings.time("write")
new_buffers = {key: buffers[key][index] for key in buffers.keys()}
self.on_act_unroll_complete(task_flags, actor_index, agent_output, env_output, new_buffers)
full_queue.put(index)
if actor_index == 0:
self.logger.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
self.logger.error(f"Exception in worker process {actor_index}: {e}")
traceback.print_exc()
print()
raise e
finally:
self.logger.info(f"Finalizing actor {actor_index}")
if env is not None:
env.close()
def get_batch(
self,
flags,
free_queue: py_mp.Queue,
full_queue: py_mp.Queue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock,
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def compute_loss(self, model_flags, task_flags, learner_model, batch, initial_agent_state, with_custom_loss=True):
# Note the action_space_id isn't really used - it's used to generate an action, but we use the action that
# was already computed and executed
learner_outputs, unused_state = learner_model(batch, task_flags.action_space_id, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
# from https://github.com/MiniHackPlanet/MiniHack/blob/e124ae4c98936d0c0b3135bf5f202039d9074508/minihack/agent/polybeast/polybeast_learner.py#L243
if model_flags.normalize_reward:
learner_model.update_running_moments(rewards)
rewards /= learner_model.get_running_std()
if model_flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif model_flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * model_flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = self.compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = model_flags.baseline_cost * self.compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = model_flags.entropy_cost * self.compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
stats = {
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
if with_custom_loss: # auxilary terms for continual learning
custom_loss, custom_stats = self.custom_loss(task_flags, learner_model, initial_agent_state)
total_loss += custom_loss
stats.update(custom_stats)
return total_loss, stats, pg_loss, baseline_loss
def learn(
self,
model_flags,
task_flags,
actor_model,
learner_model,
batch,
initial_agent_state,
optimizer,
scheduler,
lock,
):
"""Performs a learning (optimization) step."""
with lock:
# Only log the real batch of new data, not the manipulated version for training, so save it off
batch_for_logging = copy.deepcopy(batch)
# Prepare the batch for training (e.g. augmenting with more data)
batch = self.get_batch_for_training(batch)
total_loss, stats, _, _ = self.compute_loss(model_flags, task_flags, learner_model, batch, initial_agent_state)
# The episode_return may be nan if we're using an EpisodicLifeEnv (for Atari), where episode_return is nan
# until the end of the game, where a real return is produced.
batch_done_flags = batch_for_logging["done"] * ~torch.isnan(batch_for_logging["episode_return"])
episode_returns = batch_for_logging["episode_return"][batch_done_flags]
stats.update({
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
})
optimizer.zero_grad()
total_loss.backward()
norm = nn.utils.clip_grad_norm_(learner_model.parameters(), model_flags.grad_norm_clipping)
stats["total_norm"] = norm.item()
optimizer.step()
if scheduler is not None:
scheduler.step()
actor_model.load_state_dict(learner_model.state_dict())
return stats
def create_buffer_specs(self, unroll_length, obs_shape, num_actions):
T = unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
return specs
def create_buffers(self, flags, obs_shape, num_actions) -> Buffers:
specs = self.create_buffer_specs(flags.unroll_length, obs_shape, num_actions)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def create_learn_threads(self, batch_and_learn, stats_lock, thread_free_queue, thread_full_queue):
learner_thread_states = [LearnerThreadState() for _ in range(self._model_flags.num_learner_threads)]
batch_lock = threading.Lock()
learn_lock = threading.Lock()
threads = []
for i in range(self._model_flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i, stats_lock, learner_thread_states[i], batch_lock, learn_lock, thread_free_queue, thread_full_queue)
)
thread.start()
threads.append(thread)
return threads, learner_thread_states
def cleanup(self):
# We've finished the task, so reset the appropriate counter
self.logger.info("Finishing task, setting timestep_returned to 0")
self.last_timestep_returned = 0
# Ensure the training loop will end
self._train_loop_id_running = None
self._cleanup_parallel_workers()
def _cleanup_parallel_workers(self):
self.logger.info("Cleaning up actors")
# Send the signal to the actors to die, and resume them so they can (if they're not already dead)
for actor_index, actor in enumerate(self._actor_processes):
self.free_queue.put(None)
try:
actor_process = psutil.Process(actor.pid)
actor_process.resume()
except (psutil.NoSuchProcess, psutil.AccessDenied, ValueError):
# If it's already dead, just let it go
pass
# Try wait for the actors to end cleanly. If they do not, try to force a termination
for actor_index, actor in enumerate(self._actor_processes):
try:
actor.join(30) # Give up on waiting eventually
if actor.exitcode is None:
actor.terminate()
actor.close()
self.logger.info(f"[Actor {actor_index}] Cleanup complete")
except ValueError: # if actor already killed
pass
# Pause the learner so we don't keep churning out results when we're done (or something died)
self.logger.info("Cleaning up learners")
for thread_state in self._learner_thread_states:
thread_state.state = LearnerThreadState.STOP_REQUESTED
self.logger.info("Cleaning up parallel workers complete")
def resume_actor_processes(self, ctx, task_flags, actor_processes, free_queue, full_queue, initial_agent_state_buffers):
# Copy, so iterator and what's being updated are separate
actor_processes_copy = actor_processes.copy()
for actor_index, actor in enumerate(actor_processes_copy):
allowed_statuses = ["running", "sleeping", "disk-sleep"]
actor_pid = None # actor.pid fails with ValueError if the process is already closed
try:
actor_pid = actor.pid
actor_process = psutil.Process(actor_pid)
actor_process.resume()
recreate_actor = not actor_process.is_running() or actor_process.status() not in allowed_statuses
except (psutil.NoSuchProcess, psutil.AccessDenied, ValueError):
recreate_actor = True
if recreate_actor:
# Kill the original ctx.Process object, rather than the one attached to by pid
# Attempting to fix an issue where the actor processes are hanging, CPU util shows zero
try:
actor_processes[actor_index].kill()
actor_processes[actor_index].join()
actor_processes[actor_index].close()
except ValueError: # if actor already killed
pass
self.logger.warn(
f"Actor with pid {actor_pid} in actor index {actor_index} was unable to be restarted. Recreating...")
new_actor = ctx.Process(
target=self.act,
args=(
self._model_flags,
task_flags,
actor_index,
free_queue,
full_queue,
self.actor_model,
self.buffers,
initial_agent_state_buffers,
),
)
new_actor.start()
actor_processes[actor_index] = new_actor
def save(self, output_path):
if self._model_flags.disable_checkpoint:
return
model_file_path = os.path.join(output_path, "model.tar")
# Back up previous model (sometimes they can get corrupted)
if os.path.exists(model_file_path):
shutil.copyfile(model_file_path, os.path.join(output_path, "model_bak.tar"))
# Save the model
self.logger.info(f"Saving model to {output_path}")
checkpoint_data = {
"model_state_dict": self.actor_model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
}
if self._scheduler is not None:
checkpoint_data["scheduler_state_dict"] = self._scheduler.state_dict()
torch.save(checkpoint_data, model_file_path)
# Save metadata
metadata_path = os.path.join(output_path, "impala_metadata.json")
metadata = {"last_timestep_returned": self.last_timestep_returned}
with open(metadata_path, "w+") as metadata_file:
json.dump(metadata, metadata_file)
def load(self, output_path):
model_file_path = os.path.join(output_path, "model.tar")
if os.path.exists(model_file_path):
self.logger.info(f"Loading model from {output_path}")
checkpoint = torch.load(model_file_path, map_location="cpu")
self.actor_model.load_state_dict(checkpoint["model_state_dict"])
self.learner_model.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
if self._model_flags.use_scheduler:
self._scheduler_state_dict = checkpoint.get("scheduler_state_dict", None)
if self._scheduler_state_dict is None:
# Tracked by issue #109
self.logger.warn("No scheduler state dict found to load when one was expected.")
else:
self.logger.info("No model to load, starting from scratch")
# Load metadata
metadata_path = os.path.join(output_path, "impala_metadata.json")
if os.path.exists(metadata_path):
self.logger.info(f"Loading impala metdata from {metadata_path}")
with open(metadata_path, "r") as metadata_file:
metadata = json.load(metadata_file)
self.last_timestep_returned = metadata["last_timestep_returned"]
def train(self, task_flags): # pylint: disable=too-many-branches, too-many-statements
T = self._model_flags.unroll_length
B = self._model_flags.batch_size
def lr_lambda(epoch):
return 1 - min(epoch * T * B, task_flags.total_steps) / task_flags.total_steps
if self._model_flags.use_scheduler:
self._scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda)
else:
self._scheduler = None
if self._scheduler is not None and self._scheduler_state_dict is not None:
self.logger.info("Loading scheduler state dict")
self._scheduler.load_state_dict(self._scheduler_state_dict)
self._scheduler_state_dict = None
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(self._model_flags.num_buffers):
state = self.actor_model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
# Setup actor processes and kick them off
self._actor_processes = []
ctx = mp.get_context("fork")
# See: https://stackoverflow.com/questions/47085458/why-is-multiprocessing-queue-get-so-slow for why Manager
self.free_queue = py_mp.Manager().Queue()
self.full_queue = py_mp.Manager().Queue()
for i in range(self._model_flags.num_actors):
actor = ctx.Process(
target=self.act,
args=(
self._model_flags,
task_flags,
i,
self.free_queue,
self.full_queue,
self.actor_model,
self.buffers,
initial_agent_state_buffers,
),
)
actor.start()
self._actor_processes.append(actor)
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
self.logger.info("# Step\t%s", "\t".join(stat_keys))
step, collected_stats = self.last_timestep_returned, {}
self._stats_lock = threading.Lock()
def batch_and_learn(i, lock, thread_state, batch_lock, learn_lock, thread_free_queue, thread_full_queue):
"""Thread target for the learning process."""
try:
nonlocal step, collected_stats
timings = prof.Timings()
while True:
# If we've requested a stop, indicate it and end the thread
with thread_state.lock:
if thread_state.state == LearnerThreadState.STOP_REQUESTED:
thread_state.state = LearnerThreadState.STOPPED
return
thread_state.state = LearnerThreadState.RUNNING
timings.reset()
batch, agent_state = self.get_batch(
self._model_flags,
thread_free_queue,
thread_full_queue,
self.buffers,
initial_agent_state_buffers,
timings,
batch_lock,
)
stats = self.learn(
self._model_flags, task_flags, self.actor_model, self.learner_model, batch, agent_state, self.optimizer, self._scheduler, learn_lock
)
timings.time("learn")
with lock:
step += T * B
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
self.plogger.info(to_log)
# We might collect stats more often than we return them to the caller, so collect them all
for key in stats.keys():
if key not in collected_stats:
collected_stats[key] = []
if isinstance(stats[key], tuple) or isinstance(stats[key], list):
collected_stats[key].extend(stats[key])
else:
collected_stats[key].append(stats[key])
except Exception as e:
self.logger.error(f"Learner thread failed with exception {e}")
raise e
if i == 0:
self.logger.info("Batch and learn: %s", timings.summary())
thread_state.state = LearnerThreadState.STOPPED
for m in range(self._model_flags.num_buffers):
self.free_queue.put(m)
threads, self._learner_thread_states = self.create_learn_threads(batch_and_learn, self._stats_lock, self.free_queue, self.full_queue)
# Create the id for this train loop, and only loop while it is the active id
assert self._train_loop_id_running is None, "Attempting to start a train loop while another is active."
train_loop_id = self._train_loop_id_counter
self._train_loop_id_counter += 1
self._train_loop_id_running = train_loop_id
self.logger.info(f"Starting train loop id {train_loop_id}")
timer = timeit.default_timer
try:
while self._train_loop_id_running == train_loop_id:
start_step = step
start_time = timer()
time.sleep(self._model_flags.seconds_between_yields)
# Copy right away, because there's a race where stats can get re-set and then certain things set below
# will be missing (eg "step")
with self._stats_lock:
stats_to_return = copy.deepcopy(collected_stats)
collected_stats.clear()
sps = (step - start_step) / (timer() - start_time)
# Aggregate our collected values. Do it with mean so it's not sensitive to the number of times
# learning occurred in the interim
mean_return = np.array(stats_to_return.get("episode_returns", [np.nan])).mean()
stats_to_return["mean_episode_return"] = mean_return
# Make a copy of the keys so we're not updating it as we iterate over it
for key in list(stats_to_return.keys()).copy():
if key.endswith("loss") or key == "total_norm":
# Replace with the number we collected and the mean value, otherwise the logs are very verbose
stats_to_return[f"{key}_count"] = len(np.array(stats_to_return.get(key, [])))
stats_to_return[key] = np.array(stats_to_return.get(key, [np.nan])).mean()
self.logger.info(
"Steps %i @ %.1f SPS. Mean return %f. Stats:\n%s",
step,
sps,
mean_return,
pprint.pformat(stats_to_return),
)
stats_to_return["step"] = step
stats_to_return["step_delta"] = step - self.last_timestep_returned
try:
video = self._videos_to_log.get(block=False)
stats_to_return["video"] = video
except queue.Empty:
pass
except (FileNotFoundError, ConnectionRefusedError, ConnectionResetError, RuntimeError) as e:
# Sometimes it seems like the videos_to_log socket fails. Since video logging is not
# mission-critical, just let it go.
self.logger.warning(f"Video logging socket seems to have failed with error {e}. Aborting video log.")
pass
# This block sets us up to yield our results in batches, pausing everything while yielded.
if self.last_timestep_returned != step:
self.last_timestep_returned = step
# Stop learn threads, they are recreated after yielding.
# Do this before the actors in case we need to do a last batch
self.logger.info("Stopping learners")
for thread_id, thread_state in enumerate(self._learner_thread_states):
wait = False
with thread_state.lock:
if thread_state.state != LearnerThreadState.STOPPED and threads[thread_id].is_alive():
thread_state.state = LearnerThreadState.STOP_REQUESTED
wait = True
# Wait for it to stop, otherwise we have training overlapping with eval, and possibly
# the thread creation below
if wait:
thread_state.wait_for([LearnerThreadState.STOPPED], timeout=30)
# The actors will keep going unless we pause them, so...do that.
if self._model_flags.pause_actors_during_yield:
for actor in self._actor_processes:
psutil.Process(actor.pid).suspend()
# Make sure the queue is empty (otherwise things can get dropped in the shuffle)
# (Not 100% sure relevant but:) https://stackoverflow.com/questions/19257375/python-multiprocessing-queue-put-not-working-for-semi-large-data
while not self.free_queue.empty():
try:
self.free_queue.get(block=False)
except queue.Empty:
# Race between empty check and get, I guess
break
while not self.full_queue.empty():
try:
self.full_queue.get(block=False)
except queue.Empty:
# Race between empty check and get, I guess
break
yield stats_to_return
# Ensure everything is set back up to train
self.actor_model.train()
self.learner_model.train()
# Resume the actors. If one is dead, replace it with a new one
if self._model_flags.pause_actors_during_yield:
self.resume_actor_processes(ctx, task_flags, self._actor_processes, self.free_queue, self.full_queue,
initial_agent_state_buffers)
# Resume the learners by creating new ones
self.logger.info("Restarting learners")
threads, self._learner_thread_states = self.create_learn_threads(batch_and_learn, self._stats_lock, self.free_queue, self.full_queue)
self.logger.info("Restart complete")
for m in range(self._model_flags.num_buffers):
self.free_queue.put(m)
self.logger.info("Free queue re-populated")
except KeyboardInterrupt:
pass
finally:
self._cleanup_parallel_workers()
for thread in threads:
thread.join()
self.logger.info("Learning finished after %d steps.", step)
@staticmethod
def _collect_test_episode(pickled_args):
task_flags, logger, model = cloudpickle.loads(pickled_args)
gym_env, seed = Utils.make_env(task_flags.env_spec, create_seed=True)
logger.info(f"Environment and libraries setup with seed {seed}")
env = environment.Environment(gym_env)
observation = env.initial()
done = False
step = 0
returns = []
while not done:
if task_flags.mode == "test_render":
env.gym_env.render()
agent_outputs = model(observation, task_flags.action_space_id)
policy_outputs, _ = agent_outputs
observation = env.step(policy_outputs["action"])
step += 1
done = observation["done"].item() and not torch.isnan(observation["episode_return"])
# NaN if the done was "fake" (e.g. Atari). We want real scores here so wait for the real return.
if done:
returns.append(observation["episode_return"].item())
logger.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
env.close()
return step, returns
def test(self, task_flags, num_episodes: int = 10):
if not self._model_flags.no_eval_mode:
self.actor_model.eval()
returns = []
step = 0
# Break the number of episodes we need to run up into batches of num_parallel, which get run concurrently
for batch_start_id in range(0, num_episodes, self._model_flags.eval_episode_num_parallel):
# If we are in the last batch, only do the necessary number, otherwise do the max num in parallel
batch_num_episodes = min(num_episodes - batch_start_id, self._model_flags.eval_episode_num_parallel)
with Pool(processes=batch_num_episodes) as pool:
async_objs = []
for episode_id in range(batch_num_episodes):
pickled_args = cloudpickle.dumps((task_flags, self.logger, self.actor_model))
async_obj = pool.apply_async(self._collect_test_episode, (pickled_args,))
async_objs.append(async_obj)
for async_obj in async_objs:
episode_step, episode_returns = async_obj.get()
step += episode_step
returns.extend(episode_returns)
self.logger.info(
"Average returns over %i episodes: %.1f", len(returns), sum(returns) / len(returns)
)
stats = {"episode_returns": returns, "step": step, "num_episodes": len(returns)}
yield stats
| StarcoderdataPython |
3239452 | from __future__ import unicode_literals
import sure # noqa
import moto.server as server
from moto import mock_mediastore
"""
Test the different server responses
"""
@mock_mediastore
def test_mediastore_lists_containers():
backend = server.create_backend_app("mediastore")
test_client = backend.test_client()
res = test_client.get(
"/", headers={"X-Amz-Target": "MediaStore_20170901.ListContainers"},
)
result = res.data.decode("utf-8")
result.should.contain('"Containers": []')
| StarcoderdataPython |
3413223 | <filename>test/unit/test_cache.py
from unittest import TestCase
from dbt.adapters.cache import RelationsCache
from dbt.adapters.default.relation import DefaultRelation
from multiprocessing.dummy import Pool as ThreadPool
import dbt.exceptions
import random
import time
def make_relation(schema, identifier):
return DefaultRelation.create(schema=schema, identifier=identifier)
def make_mock_relationship(schema, identifier):
return DefaultRelation.create(
database='test_db', schema=schema, identifier=identifier,
table_name=identifier, type='view'
)
class TestCache(TestCase):
def setUp(self):
self.cache = RelationsCache()
def test_empty(self):
self.assertEqual(len(self.cache.relations), 0)
relations = self.cache.get_relations('test')
self.assertEqual(len(relations), 0)
def test_bad_drop(self):
self.cache.drop(make_relation('foo', 'bar'))
def test_bad_link(self):
self.cache.add(make_relation('schema', 'foo'))
# src does not exist
with self.assertRaises(dbt.exceptions.InternalException):
self.cache.add_link(make_relation('schema', 'bar'),
make_relation('schema', 'foo'))
# dst does not exist
with self.assertRaises(dbt.exceptions.InternalException):
self.cache.add_link(make_relation('schema', 'foo'),
make_relation('schema', 'bar'))
def test_bad_rename(self):
# foo does not exist - should be ignored
self.cache.rename(make_relation('schema', 'foo'),
make_relation('schema', 'bar'))
self.cache.add(make_relation('schema', 'foo'))
self.cache.add(make_relation('schema', 'bar'))
# bar exists
with self.assertRaises(dbt.exceptions.InternalException):
self.cache.rename(make_relation('schema', 'foo'),
make_relation('schema', 'bar'))
def test_get_relations(self):
relation = make_relation('foo', 'bar')
self.cache.add(relation)
self.assertEqual(len(self.cache.relations), 1)
relations = self.cache.get_relations('foo')
self.assertEqual(len(relations), 1)
self.assertIs(relations[0], relation)
relations = self.cache.get_relations('FOO')
self.assertEqual(len(relations), 1)
self.assertIs(relations[0], relation)
def test_add(self):
rel = make_relation('foo', 'bar')
self.cache.add(rel)
relations = self.cache.get_relations('foo')
self.assertEqual(len(relations), 1)
self.assertIs(relations[0], rel)
# add a new relation with same name
self.cache.add(make_relation('foo', 'bar'))
self.assertEqual(len(self.cache.relations), 1)
self.assertEqual(self.cache.schemas, {'foo'})
relations = self.cache.get_relations('foo')
self.assertEqual(len(relations), 1)
self.assertIs(relations[0], rel)
self.cache.add(make_relation('FOO', 'baz'))
self.assertEqual(len(self.cache.relations), 2)
relations = self.cache.get_relations('foo')
self.assertEqual(len(relations), 2)
self.assertEqual(self.cache.schemas, {'foo'})
self.assertIsNot(self.cache.relations[('foo', 'bar')].inner, None)
self.assertIsNot(self.cache.relations[('foo', 'baz')].inner, None)
def test_rename(self):
self.cache.add(make_relation('foo', 'bar'))
self.assertIsNot(self.cache.relations[('foo', 'bar')].inner, None)
self.cache.rename(make_relation('foo', 'bar'),
make_relation('foo', 'baz'))
relations = self.cache.get_relations('foo')
self.assertEqual(len(relations), 1)
self.assertEqual(relations[0].schema, 'foo')
self.assertEqual(relations[0].identifier, 'baz')
relation = self.cache.relations[('foo', 'baz')]
self.assertEqual(relation.inner.schema, 'foo')
self.assertEqual(relation.inner.identifier, 'baz')
self.assertEqual(relation.schema, 'foo')
self.assertEqual(relation.identifier, 'baz')
with self.assertRaises(KeyError):
self.cache.relations[('foo', 'bar')]
class TestLikeDbt(TestCase):
def setUp(self):
self.cache = RelationsCache()
self._sleep = True
# add a bunch of cache entries
for ident in 'abcdef':
self.cache.add(make_relation('schema', ident))
# 'b' references 'a'
self.cache.add_link(make_relation('schema', 'a'),
make_relation('schema', 'b'))
# and 'c' references 'b'
self.cache.add_link(make_relation('schema', 'b'),
make_relation('schema', 'c'))
# and 'd' references 'b'
self.cache.add_link(make_relation('schema', 'b'),
make_relation('schema', 'd'))
# and 'e' references 'a'
self.cache.add_link(make_relation('schema', 'a'),
make_relation('schema', 'e'))
# and 'f' references 'd'
self.cache.add_link(make_relation('schema', 'd'),
make_relation('schema', 'f'))
# so drop propagation goes (a -> (b -> (c (d -> f))) e)
def assert_has_relations(self, expected):
current = set(r.identifier for r in self.cache.get_relations('schema'))
self.assertEqual(current, expected)
def test_drop_inner(self):
self.assert_has_relations(set('abcdef'))
self.cache.drop(make_relation('schema', 'b'))
self.assert_has_relations({'a', 'e'})
def test_rename_and_drop(self):
self.assert_has_relations(set('abcdef'))
# drop the backup/tmp
self.cache.drop(make_relation('schema', 'b__backup'))
self.cache.drop(make_relation('schema', 'b__tmp'))
self.assert_has_relations(set('abcdef'))
# create a new b__tmp
self.cache.add(make_relation('schema', 'b__tmp',))
self.assert_has_relations(set('abcdef') | {'b__tmp'})
# rename b -> b__backup
self.cache.rename(make_relation('schema', 'b'),
make_relation('schema', 'b__backup'))
self.assert_has_relations(set('acdef') | {'b__tmp', 'b__backup'})
# rename temp to b
self.cache.rename(make_relation('schema', 'b__tmp'),
make_relation('schema', 'b'))
self.assert_has_relations(set('abcdef') | {'b__backup'})
# drop backup, everything that used to depend on b should be gone, but
# b itself should still exist
self.cache.drop(make_relation('schema', 'b__backup'))
self.assert_has_relations(set('abe'))
relation = self.cache.relations[('schema', 'a')]
self.assertEqual(len(relation.referenced_by), 1)
def _rand_sleep(self):
if not self._sleep:
return
time.sleep(random.random() * 0.1)
def _target(self, ident):
self._rand_sleep()
self.cache.rename(make_relation('schema', ident),
make_relation('schema', ident+'__backup'))
self._rand_sleep()
self.cache.add(make_relation('schema', ident+'__tmp')
)
self._rand_sleep()
self.cache.rename(make_relation('schema', ident+'__tmp'),
make_relation('schema', ident))
self._rand_sleep()
self.cache.drop(make_relation('schema', ident+'__backup'))
return ident, self.cache.get_relations('schema')
def test_threaded(self):
# add three more short subchains for threads to test on
for ident in 'ghijklmno':
obj = make_mock_relationship('schema', ident)
self.cache.add(make_relation('schema', ident))
self.cache.add_link(make_relation('schema', 'a'),
make_relation('schema', 'g'))
self.cache.add_link(make_relation('schema', 'g'),
make_relation('schema', 'h'))
self.cache.add_link(make_relation('schema', 'h'),
make_relation('schema', 'i'))
self.cache.add_link(make_relation('schema', 'a'),
make_relation('schema', 'j'))
self.cache.add_link(make_relation('schema', 'j'),
make_relation('schema', 'k'))
self.cache.add_link(make_relation('schema', 'k'),
make_relation('schema', 'l'))
self.cache.add_link(make_relation('schema', 'a'),
make_relation('schema', 'm'))
self.cache.add_link(make_relation('schema', 'm'),
make_relation('schema', 'n'))
self.cache.add_link(make_relation('schema', 'n'),
make_relation('schema', 'o'))
pool = ThreadPool(4)
results = list(pool.imap_unordered(self._target, ('b', 'g', 'j', 'm')))
pool.close()
pool.join()
# at a minimum, we expect each table to "see" itself, its parent ('a'),
# and the unrelated table ('a')
min_expect = {
'b': {'a', 'b', 'e'},
'g': {'a', 'g', 'e'},
'j': {'a', 'j', 'e'},
'm': {'a', 'm', 'e'},
}
for ident, relations in results:
seen = set(r.identifier for r in relations)
self.assertTrue(min_expect[ident].issubset(seen))
self.assert_has_relations(set('abgjme'))
def test_threaded_repeated(self):
for _ in range(10):
self.setUp()
self._sleep = False
self.test_threaded()
class TestComplexCache(TestCase):
def setUp(self):
self.cache = RelationsCache()
inputs = [
('foo', 'table1'),
('bar', 'table2'),
('foo', 'table3'),
('foo', 'table4'),
('bar', 'table3'),
]
self.inputs = [make_relation(s, i) for s, i in inputs]
for relation in self.inputs:
self.cache.add(relation)
# foo.table3 references foo.table1
# (create view table3 as (select * from table1...))
self.cache.add_link(
make_relation('foo', 'table1'),
make_relation('foo', 'table3')
)
# bar.table3 references foo.table3
# (create view bar.table5 as (select * from foo.table3...))
self.cache.add_link(
make_relation('foo', 'table3'),
make_relation('bar', 'table3')
)
# foo.table2 also references foo.table1
self.cache.add_link(
make_relation('foo', 'table1'),
make_relation('foo', 'table4')
)
def test_get_relations(self):
self.assertEqual(len(self.cache.get_relations('foo')), 3)
self.assertEqual(len(self.cache.get_relations('bar')), 2)
self.assertEqual(len(self.cache.relations), 5)
def test_drop_one(self):
# dropping bar.table2 should only drop itself
self.cache.drop(make_relation('bar', 'table2'))
self.assertEqual(len(self.cache.get_relations('foo')), 3)
self.assertEqual(len(self.cache.get_relations('bar')), 1)
self.assertEqual(len(self.cache.relations), 4)
def test_drop_many(self):
# dropping foo.table1 should drop everything but bar.table2.
self.cache.drop(make_relation('foo', 'table1'))
self.assertEqual(len(self.cache.get_relations('foo')), 0)
self.assertEqual(len(self.cache.get_relations('bar')), 1)
self.assertEqual(len(self.cache.relations), 1)
def test_rename_root(self):
self.cache.rename(make_relation('foo', 'table1'),
make_relation('bar', 'table1'))
retrieved = self.cache.relations[('bar', 'table1')].inner
self.assertEqual(retrieved.schema, 'bar')
self.assertEqual(retrieved.identifier, 'table1')
self.assertEqual(len(self.cache.get_relations('foo')), 2)
self.assertEqual(len(self.cache.get_relations('bar')), 3)
# make sure drops still cascade from the renamed table
self.cache.drop(make_relation('bar', 'table1'))
self.assertEqual(len(self.cache.get_relations('foo')), 0)
self.assertEqual(len(self.cache.get_relations('bar')), 1)
self.assertEqual(len(self.cache.relations), 1)
def test_rename_branch(self):
self.cache.rename(make_relation('foo', 'table3'),
make_relation('foo', 'table2'))
self.assertEqual(len(self.cache.get_relations('foo')), 3)
self.assertEqual(len(self.cache.get_relations('bar')), 2)
# make sure drops still cascade through the renamed table
self.cache.drop(make_relation('foo', 'table1'))
self.assertEqual(len(self.cache.get_relations('foo')), 0)
self.assertEqual(len(self.cache.get_relations('bar')), 1)
self.assertEqual(len(self.cache.relations), 1)
| StarcoderdataPython |
8164980 | <reponame>worko1/python_learn
import yaml
from pprint import pprint
y_filename = raw_input('Enter filaname:')
# y_filename='yaml_file.yml'
def read_yaml(input_file):
with open(input_file ,"r") as f:
return (yaml.load(f))
pprint(read_yaml(y_filename)) | StarcoderdataPython |
3513864 | from enum import Enum
from typing import List, Dict
from kbc_pul.experiments_utils.color_utils import matplotlib_color_name_to_hex
class GroupNameEnum(Enum):
total = "$\left|\mathbf{R}\\right|$"
filter = '$S_{q}$'
other = '$S_{\\neg q}$'
@staticmethod
def get_groups_as_ordered_strings() -> List[str]:
return [
GroupNameEnum.total.value,
GroupNameEnum.filter.value,
GroupNameEnum.other.value
]
@ staticmethod
def get_color_palette() -> Dict[str, str]:
return {
GroupNameEnum.total.value: matplotlib_color_name_to_hex("blue"),
GroupNameEnum.filter.value: matplotlib_color_name_to_hex("green"),
GroupNameEnum.other.value: matplotlib_color_name_to_hex("red")
}
class CNameEnum(Enum):
cname_true_conf = 'true_conf'
cname_true_conf_in_filter = 'true_conf_on_predictions_in_filter'
cname_true_conf_not_in_filter = 'true_conf_on_predictions_not_in_filter'
cname_true_pos_pair_conf_s_to_o = 'true_pos_pair_conf_s_to_o'
cname_true_pos_pair_conf_s_to_o_in_filter = 'true_pos_pair_s_to_o_conf_on_predictions_in_filter'
cname_true_pos_pair_conf_s_to_o_not_in_filter = 'true_pos_pair_s_to_o_conf_on_predictions_not_in_filter'
cname_true_pos_pair_conf_o_to_s = 'true_pos_pair_conf_o_to_s'
cname_true_pos_pair_conf_o_to_s_in_filter = 'true_pos_pair_o_to_s_conf_on_predictions_in_filter'
cname_true_pos_pair_conf_o_to_s_not_in_filter = 'true_pos_pair_o_to_s_conf_on_predictions_not_in_filter'
cname_rel_true_conf_in_filter = 'rel_conf_in_filter'
cname_rel_true_conf_not_in_filter = 'rel_conf_not_in_filter'
cname_n_preds = 'n_predictions'
cname_n_preds_in_filter = 'n_predictions_in_filter'
cname_n_preds_not_in_filter = 'n_predictions_not_in_filter'
cname_rel_n_preds_in_filter = 'rel_n_predictions_in_filter'
cname_rel_n_preds_not_in_filter = 'rel_n_predictions_not_in_filter'
cname_percentage_preds_in_filter = 'percentage_n_predictions_in_filter'
cname_percentage_preds_not_in_filter = 'percentage_n_predictions_not_in_filter'
@staticmethod
def get_absolute_confidence_cnames() -> List['CNameEnum']:
return [
CNameEnum.cname_true_conf,
CNameEnum.cname_true_conf_in_filter,
CNameEnum.cname_true_conf_not_in_filter
]
@staticmethod
def get_relative_confidence_cnames() -> List['CNameEnum']:
return [
CNameEnum.cname_rel_true_conf_in_filter,
CNameEnum.cname_rel_true_conf_not_in_filter
]
@staticmethod
def get_absolute_n_prediction_cnames() -> List['CNameEnum']:
return [
CNameEnum.cname_n_preds,
CNameEnum.cname_n_preds_in_filter,
CNameEnum.cname_n_preds_not_in_filter
]
@staticmethod
def get_relative_n_prediction_cnames() -> List['CNameEnum']:
return [
CNameEnum.cname_rel_n_preds_in_filter,
CNameEnum.cname_rel_n_preds_not_in_filter
]
def absolute_confidence_column_to_pretty_name(old_column_name: str) -> str:
if old_column_name == CNameEnum.cname_true_conf.value:
return GroupNameEnum.total.value
elif old_column_name == CNameEnum.cname_true_conf_in_filter.value:
return GroupNameEnum.filter.value
elif old_column_name == CNameEnum.cname_true_conf_not_in_filter.value:
return GroupNameEnum.other.value
else:
raise Exception()
def relative_conf_column_to_pretty_name(old_column_name: str) -> str:
if old_column_name == CNameEnum.cname_rel_true_conf_in_filter.value:
return GroupNameEnum.filter.value
elif old_column_name == CNameEnum.cname_rel_true_conf_not_in_filter.value:
return GroupNameEnum.other.value
else:
raise Exception()
def absolute_pair_positive_conf_s_to_o_column_to_pretty_name(old_column_name: str) -> str:
if old_column_name == CNameEnum.cname_true_pos_pair_conf_s_to_o.value:
return GroupNameEnum.total.value
elif old_column_name == CNameEnum.cname_true_pos_pair_conf_s_to_o_in_filter.value:
return GroupNameEnum.filter.value
elif old_column_name == CNameEnum.cname_true_pos_pair_conf_s_to_o_not_in_filter.value:
return GroupNameEnum.other.value
else:
raise Exception()
def absolute_n_predictions_column_to_pretty_name(old_column_name: str)-> str:
if old_column_name == CNameEnum.cname_n_preds.value:
return GroupNameEnum.total.value
elif old_column_name == CNameEnum.cname_n_preds_in_filter.value:
return GroupNameEnum.filter.value
elif old_column_name == CNameEnum.cname_n_preds_not_in_filter.value:
return GroupNameEnum.other.value
else:
raise Exception()
def relative_n_predictions_column_to_pretty_name(old_column_name: str) -> str:
if old_column_name == CNameEnum.cname_rel_n_preds_in_filter.value:
return GroupNameEnum.filter.value
elif old_column_name == CNameEnum.cname_rel_n_preds_not_in_filter.value:
return GroupNameEnum.other.value
else:
raise Exception()
| StarcoderdataPython |
6556711 | import json
import re
from tornado.httputil import format_timestamp
from tornado.web import RequestHandler
from engine.common.settings import load_settings
from engine.tornado_messages import FlashMessageMixin
from engine.user.user_manager import UserManager
user_manager = UserManager(load_settings('settings.yaml'))
_user_manager = None
def get_user_manager():
global _user_manager
if _user_manager is None:
_user_manager = UserManager(load_settings('settings.yaml'))
return _user_manager
class BackdoorGateway(RequestHandler, FlashMessageMixin):
def get(self, *args, **kwargs):
message = None
level = None
if self.has_message('danger'):
level = 'danger'
message = self.get_message(level)
if self.has_message('success'):
level = 'success'
message = self.get_message(level)
self.render('gateway.html', message=message, level=level, active_url=lambda x: x, user_id=None)
class AbstractBackdoorHandler(RequestHandler, FlashMessageMixin):
def get_user(self):
self.user_id = self.get_argument('user_id')
user_manager = get_user_manager()
user = user_manager.get(self.user_id, auto_create=False)
if user is None:
message = {'message': 'User does not exists', 'data': {'user_id': self.user_id}}
self.set_message(message, 'danger')
self.redirect('/')
return
return user
def render(self, template_name, **kwargs):
user_id = self.get_argument('user_id')
user = self.get_user()
kwargs['user_id'] = user_id
kwargs['message'] = kwargs.get('message', None)
kwargs['active_processes_count'] = len(user.active_processes)
kwargs['active_url'] = self._check_active_url
kwargs['format_timestamp'] = format_timestamp
super(AbstractBackdoorHandler, self).render(template_name, **kwargs)
def _check_active_url(self, url):
request_uri = self.request.uri
controller = re.match('/(.*)/', request_uri).group()
return 'active' if url in controller else ''
class BackdoorUserHandler(AbstractBackdoorHandler):
def get(self, *args, **kwargs):
self.user_id = self.get_argument('user_id')
arguments = self.request.arguments
if 'panel' in arguments:
user = self.get_user()
self.render(
'user.html',
user=user,
)
if 'play' in arguments:
self.redirect('http://local.wysegames.com:8081/?action=authenticate&user_id={}'.format(self.user_id))
class BackdoorActiveProcessesHandler(AbstractBackdoorHandler):
def get(self, *args, **kwargs):
user = self.get_user()
self.render(
'active_processes.html',
user=user,
active_processes=user.active_processes)
class BackdoorWipeHandler(RequestHandler, FlashMessageMixin):
def get(self, *args, **kwargs):
self.user_id = self.get_argument('user_id')
user_manager = get_user_manager()
user = user_manager.get(self.user_id, auto_create=False)
if user is None:
message = {'message': 'User does not exists', 'data': {'user_id': self.user_id}}
self.set_message(message, 'danger')
self.redirect('/')
else:
user_manager.delete(self.user_id)
message = {'message': 'User successfully wiped', 'data': {'user_id': self.user_id}}
self.set_message(message, 'success')
self.redirect('/')
class MapHandler(AbstractBackdoorHandler, FlashMessageMixin):
def get(self, *args, **kwargs):
user = self.get_user()
self.render(
'map.html',
map_data=json.dumps(user.map),
)
class CommandsLogHandler(AbstractBackdoorHandler, FlashMessageMixin):
def get(self, user_id, *args, **kwargs):
user_manager = get_user_manager()
log = user_manager.get_commands_log(user_id)
self.set_header("Content-Type", "application/json")
self.write(json.dumps(log, indent=4))
| StarcoderdataPython |
8057313 | <filename>tensortrade/data/stream/transform.py
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Union, Callable
from .node import Node, Module
class BinOp(Node):
def __init__(self, name: str, op):
super().__init__(name)
self.op = op
def forward(self):
return self.op(self.inputs[0].value, self.inputs[1].value)
def has_next(self):
return True
def reset(self):
pass
class Reduce(Node):
def __init__(self,
name: str,
func: Callable[[float, float], float]):
super().__init__(name)
self.func = func
def forward(self):
return functools.reduce(self.func, [node.value for node in self.inputs])
def has_next(self):
return True
def reset(self):
pass
class Select(Node):
def __init__(self, selector: Union[Callable[[str], bool], str]):
if isinstance(selector, str):
self.key = selector
self.selector = lambda x: x.name == selector
else:
self.key = None
self.selector = selector
super().__init__(self.key or "select")
self._node = None
def forward(self):
if not self._node:
self._node = list(filter(self.selector, self.inputs))[0]
self.name = self._node.name
return self._node.value
def has_next(self):
return True
def reset(self):
pass
class Lambda(Node):
def __init__(self, name: str, extract: Callable[[any], float], obj: any):
super().__init__(name)
self.extract = extract
self.obj = obj
def forward(self):
return self.extract(self.obj)
def has_next(self):
return True
def reset(self):
pass
class Forward(Lambda):
def __init__(self, node: 'Node'):
super().__init__(
name=node.name,
extract=lambda x: x.value,
obj=node
)
self(node)
class Condition(Module):
def __init__(self, name: str, condition: Callable[['Node'], bool]):
super().__init__(name)
self.condition = condition
def build(self):
self.variables = list(filter(self.condition, self.inputs))
def has_next(self):
return True
| StarcoderdataPython |
9663859 | <reponame>jakev/dtf<gh_stars>10-100
# Android Device Testing Framework ("dtf")
# Copyright 2013-2017 <NAME> (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for the "pm repo" utility"""
from __future__ import absolute_import
import dtf.testutils as testutils
class PmRepoTests(testutils.BasicIntegrationTest):
"""Wraper for integration tests"""
def test_no_args(self):
"""Running repo with no args"""
rtn = self.run_cmd("pm repo")
assert(rtn.return_code == 0)
def test_invalid_cmd(self):
"""Run repo with invalid cmd"""
rtn = self.run_cmd("pm repo NOTHING")
assert(rtn.return_code == 255)
def test_repo_add_valid(self):
"""Try to readd a repo with same name"""
rtn = self.run_cmd("pm repo add core-mods https://somethingsilly.com")
assert(rtn.return_code == 0)
def test_repo_add_wrong_args(self):
"""Run add with incorrect args"""
rtn = self.run_cmd("pm repo add")
assert(rtn.return_code == 255)
def test_repo_add_invalid_url(self):
"""Try to add invalid repo URL"""
rtn = self.run_cmd("pm repo add core-mods somethingsilly.com")
assert(rtn.return_code == 254)
def test_repo_add_already_exists(self):
"""Try to re-add a repo with same name"""
rtn = self.run_cmd("pm repo add core-mods https://somethingsilly.com")
assert(rtn.return_code == 0)
rtn = self.run_cmd("pm repo add core-mods https://somethingsilly.com")
assert(rtn.return_code == 253)
def test_repo_remove_valid(self):
"""Add then remove a repo"""
rtn = self.run_cmd("pm repo add core-mods https://somethingsilly.com")
assert(rtn.return_code == 0)
rtn = self.run_cmd("pm repo remove core-mods")
assert(rtn.return_code == 0)
def test_repo_remove_wrong_args(self):
"""Run remove with incorrect args"""
rtn = self.run_cmd("pm repo remove")
assert(rtn.return_code == 255)
def test_repo_remove_nonexist(self):
"""Attempt to remove not exist repo"""
rtn = self.run_cmd("pm repo remove silly")
assert(rtn.return_code == 253)
def test_repo_list_empty(self):
"""List no repos"""
rtn = self.run_cmd("pm repo list")
assert(rtn.return_code == 0)
def test_repo_list_valid(self):
"""List no repos"""
rtn = self.run_cmd("pm repo add mods-core https://silly.com")
assert(rtn.return_code == 0)
rtn = self.run_cmd("pm repo list")
assert(rtn.return_code == 0)
| StarcoderdataPython |
9706251 | <gh_stars>1-10
def fizzbuzz(n):
return ["FizzBuzz" if (i%3==0 and i%5==0) else "Fizz" if i%3==0 else "Buzz" if i%5==0 else i for i in range(1,n+1)]
| StarcoderdataPython |
6602900 | <filename>pactman/mock/mock_urlopen.py
import io
import logging
import urllib3.connectionpool
import urllib3.poolmanager
from urllib3.response import HTTPResponse
from .pact_request_handler import PactRequestHandler
_providers = {}
log = logging.getLogger(__name__)
class MockConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
mocks = {}
@classmethod
def add_mock(cls, mock):
cls.mocks[mock.pact.port] = mock
@classmethod
def remove_mock(cls, mock):
del cls.mocks[mock.pact.port]
def urlopen(self, method, url, body=None, headers=None, *args, **kwargs):
if self.port not in self.mocks:
return super().urlopen(method, url, body, headers, *args, **kwargs)
return self.mocks[self.port](method, url, body=body, headers=headers)
class MonkeyPatcher:
def __init__(self):
self.patched = False
def add_service(self, handler):
MockConnectionPool.add_mock(handler)
if not self.patched:
self.patch()
def remove_service(self, handler):
MockConnectionPool.remove_mock(handler)
if not MockConnectionPool.mocks:
self.clear()
def patch(self):
urllib3.poolmanager.pool_classes_by_scheme["http"] = MockConnectionPool
self.patched = True
def clear(self):
urllib3.poolmanager.pool_classes_by_scheme["http"] = urllib3.connectionpool.HTTPConnectionPool
self.patched = False
patcher = MonkeyPatcher()
class MockURLOpenHandler(PactRequestHandler):
def __init__(self, config):
self.interactions = []
super().__init__(config)
patcher.add_service(self)
def terminate(self):
patcher.remove_service(self)
def setup(self, interactions):
self.interactions = interactions
def verify(self):
pass
def __call__(self, method, url, redirect=True, headers=None, body=None, **kw):
self.path = url
self.headers = headers or {}
self.body = body
return self.validate_request(method)
def get_interaction(self, path):
try:
interaction = self.interactions.pop()
except IndexError:
raise AssertionError(f'Request at {path} received but no interaction registered') from None
return interaction
def handle_failure(self, reason):
raise AssertionError(reason)
def handle_success(self, interaction):
pass
def respond_for_interaction(self, interaction):
headers = {}
if 'headers' in interaction['response']:
headers.update(interaction['response']['headers'])
if 'body' in interaction['response']:
body = self.handle_response_encoding(interaction['response'], headers)
else:
body = b''
return HTTPResponse(body=io.BytesIO(body),
status=interaction['response']['status'],
preload_content=False,
headers=headers)
| StarcoderdataPython |
5016219 | from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from direct.fsm.FSM import FSM
from panda3d.core import *
import sys, os, time, random
from Level import Level
from Camera import CameraControls
from Timer import Timer
class FNAFBase(FSM):
def __init__(self, withinTTH=False):
FSM.__init__(self, "FNAFBase")
self.withinTTH = withinTTH
if not self.withinTTH:
base.cogFont = loader.loadFont('data/vtRemingtonPortable.ttf')
base.pixelFont = loader.loadFont('data/LCD_Solid.ttf')
base.accept("escape", self.handleEsc)
base.accept("f9", self.screenshot)
else:
from toontown.toonbase import ToontownGlobals
base.cogFont = ToontownGlobals.getSuitFont()
base.level = Level()
base.timer = Timer()
base.camControls = CameraControls()
if not self.withinTTH:
self.handleGotPhases()
self.ray = CollisionRay()
cNode = CollisionNode('mousePicker')
cNode.addSolid(self.ray)
cNode.setCollideMask(BitMask32(8))
self.cnp = base.cam.attachNewNode(cNode)
self.handler = CollisionHandlerQueue()
self.clickTrav = CollisionTraverser('cTrav')
self.clickTrav.addCollider(self.cnp, self.handler)
base.accept('mouse1', self.__handleClick)
self.night = 1
def handleGotPhases(self):
base.level.load()
base.camControls.browser.load()
def __handleClick(self):
m = base.mouseWatcherNode
if m.hasMouse():
mpos = m.getMouse()
self.ray.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.clickTrav.traverse(render)
numEntries = self.handler.getNumEntries()
if numEntries > 0:
self.handler.sortEntries()
for i in xrange(numEntries):
np = self.handler.getEntry(0).getIntoNodePath()
messenger.send('click-%s' % np.getName())
def enterGame(self, night=1):
self.night = night
self.__saveProgess()
base.transitions.irisIn()
base.level.enter(night)
base.timer.enter(night)
base.camControls.enter()
base.accept("ranOutOfEnergy", self.__handleRanOutOfEnergy)
base.accept("gameFailed", self.__doFail)
base.accept("dayComplete", self.__doSuccess)
def exitGame(self):
base.level.exit()
base.timer.exit()
base.camControls.exit()
base.ignore("ranOutOfEnergy")
base.ignore("gameFailed")
def handleEsc(self):
sys.exit()
def __handleRanOutOfEnergy(self):
base.level.stopAllCogs()
cog = random.choice(list(base.level.cogs))
cog.setPos(0)
cog.setHpr(180, 0, 0)
base.camControls.demand('Flashlight')
base.camControls.demand('Off')
cog.danceAndGameOver()
def __doFail(self):
def restartNight():
self.demand('RestartNight')
Sequence(Func(base.transitions.irisOut), Wait(.5),
Func(restartNight)).start()
def enterRestartNight(self):
base.transitions.noTransitions()
def __doEnterGame(task):
self.request('Game', self.night)
return task.done
taskMgr.doMethodLater(0, __doEnterGame, 'fnafbase-doEnterGame')
def __doSuccess(self):
def advance():
if self.night == 5:
self.gameComplete()
else:
self.demand('GoToNextNight')
Sequence(Func(base.transitions.irisOut), Wait(.5),
Func(advance)).start()
def enterGoToNextNight(self):
def __doEnterGame(task):
self.request('Game', self.night + 1)
return task.done
taskMgr.doMethodLater(0, __doEnterGame, 'fnafbase-doEnterGame')
def screenshot(self):
if not os.path.isdir("screenshots"):
os.mkdir("screenshots")
base.win.saveScreenshot("screenshots/five-nights-at-the-factory-%s.jpg" % time.time())
def gameComplete(self):
if self.withinTTH:
messenger.send("FNAF-gameComplete")
# Let TTH handle it.
return
base.transitions.noTransitions()
self.request('Menu')
def enterMenu(self):
self.bgFrame = DirectFrame(parent=render2d, frameSize=(-1, 1, -1, 1), frameColor=(0, 0, 0, 1))
self.title = OnscreenText(text="Five Nights at the Factory", pos=(0, .8), font=base.cogFont,
fg=(1, 1, 1, 1), scale=.15, wordwrap=1.6 / .15)
self.newGameButton = DirectButton(text="NEW GAME", pos=(0, 0, -.2), text_font=base.pixelFont, relief=None,
scale=.1, text_fg=(1, 1, 1, 1), command=self.demand, extraArgs=['Game'])
self.continueButton = DirectButton(text="CONTINUE", pos=(0, 0, -.6), text_font=base.pixelFont, relief=None,
scale=.1, text_fg=(1, 1, 1, 1), command=self.__continue)
def exitMenu(self):
self.bgFrame.removeNode()
self.title.removeNode()
self.newGameButton.removeNode()
self.continueButton.removeNode()
def __continue(self):
lastNight = 1
if os.path.isfile('save.dat'):
with open('save.dat', 'rb') as f:
lastNight = ord(f.read(1))
if not 1 <= lastNight <= 5:
lastNight = 1
self.demand('Game', lastNight)
def __saveProgess(self):
try:
with open('save.dat', 'wb') as f:
f.write(chr(self.night))
except:
pass
def startGame(self):
base.camControls.load()
nextState = "Menu" if not self.withinTTH else "Game"
self.demand(nextState)
def leaveGame(self):
self.demand('Off')
base.camControls.unload()
| StarcoderdataPython |
9610008 | <gh_stars>1-10
from urllib.request import urlopen
from urllib.error import HTTPError
from io import BytesIO
from subprocess import call
import threading
import multiprocessing
import signal
import sys
from uuid import uuid4
from pickle import loads, dumps
import logging
import logging.config
from redis import Redis
from PyPDF2 import PdfFileMerger, PdfFileReader
from subprocess import check_output, CalledProcessError
import boto3
from config import REDIS_QUEUE_KEY, LOGGING, S3_BUCKET
redis = Redis()
logging.config.dictConfig(LOGGING)
logger = logging.getLogger(__name__)
class DelayedResult(object):
def __init__(self, key):
self.key = key
self._rv = None
@property
def return_value(self):
if self._rv is None:
rv = redis.get(self.key) # Return the value at the given key
if rv is not None:
self._rv = loads(rv) # Reads the pickled object
return self._rv
def queuefunc(f):
def delay(*args, **kwargs):
qkey = REDIS_QUEUE_KEY
key = '%s:result:%s' % (qkey, str(uuid4())) # Creates a key with the REDIS_QUEUE_KEY and a randomly generated UUID.
s = dumps((f, key, args, kwargs)) # Pickles together the function and parameters; returns the pickled representation as a string.
redis.rpush(REDIS_QUEUE_KEY, s) # Push (append) values to the tail of the stored list.
return DelayedResult(key)
f.delay = delay
return f
@queuefunc
def makePacket(merged_id, filenames_collection):
merger = PdfFileMerger(strict=False)
for filename in filenames_collection:
# Run this up to two times, in the event of a timeout, libreoffice RunTimeError ('Office probably died'), or other exception.
attempts = 0
while attempts < 2:
try:
if filename.lower().endswith(('.xlsx', '.doc', '.docx', '.ppt', '.pptx', '.rtf')) or filename in ['http://metro.legistar1.com/metro/attachments/6aaadb7d-4c9a-429b-a499-2107bc9d031e.pdf', 'http://metro.legistar1.com/metro/attachments/2146cf74-8a70-4d48-8a73-94f21a40106d.pdf', 'http://metro.legistar1.com/metro/attachments/c1fae640-108f-411d-9790-204eb7b9efbb.pdf']:
try:
logger.info('Unoconv conversion underway...')
check_output(['unoconv', '-f', 'pdf', filename])
logger.info('Successful conversion!')
except CalledProcessError as call_err:
logger.warning('Unsuccessful conversion. We had some difficulty with {}'.format(filename))
logger.warning(call_err)
error_logging(attempts, filename)
path, keyword, exact_file = filename.partition('attachments/')
new_file = exact_file.split('.')[0] + '.pdf'
f = open(new_file, 'rb')
merger.append(PdfFileReader(f))
call(['rm', new_file])
else:
opened_url = urlopen(filename).read()
try:
merger.append(BytesIO(opened_url), import_bookmarks=False)
except:
# For PDFs with a little extra garbage, we need to open, save, and re-convert them.
call(['unoconv', '-f', 'pdf', filename])
path, keyword, exact_file = filename.partition('attachments/')
new_file = exact_file.split('.')[0] + '.pdf'
f = open(new_file, 'rb')
merger.append(PdfFileReader(f))
call(['rm', new_file])
if attempts >= 1:
logger.info('Phew! It worked on the second try.')
logger.info('\n')
break
except HTTPError as err:
attempts += 1
logger.warning("\n {0} caused the following error: \n {1}".format(filename, err))
error_logging(attempts, filename)
except FileNotFoundError as err:
attempts += 1
logger.warning("\n {0} caused the following error: \n {1}".format(filename, err))
error_logging(attempts, filename)
except:
logger.warning('Encountered unexpected error while converting {}'.format(filename))
raise
try:
merged = BytesIO()
merger.write(merged)
merged.seek(0)
except SystemExit:
logger.exception('System exited while writing merged files {} as bytes'.format(filenames_collection))
raise SystemExit(1)
except Exception:
logger.exception(("{0} caused the failure of writing {1} as a PDF, and we could not merge this file collection: \n {2}").format(sys.exc_info()[0], merged_id, filenames_collection))
else:
s3 = boto3.resource('s3')
bucket = s3.Bucket(S3_BUCKET)
s3_key = bucket.Object('{id}.pdf'.format(id=merged_id))
s3_key.upload_fileobj(merged)
s3_key.Acl().put(ACL='public-read')
logger.info(("Successful merge! {}").format(merged_id))
return merger
class ChildProcessor(multiprocessing.Process):
def __init__(self, msg, **kwargs):
super().__init__(**kwargs)
self.msg = msg
def run(self):
func, key, args, kwargs = loads(self.msg)
func(*args, **kwargs)
class ParentProcessor(threading.Thread):
def __init__(self, stopper, **kwargs):
super().__init__(**kwargs)
self.stopper = stopper
def run(self):
logger.info('Listening for messages...')
while not self.stopper.is_set():
self.doWork()
def doWork(self):
msg = redis.blpop(REDIS_QUEUE_KEY)
child = ChildProcessor(msg[1])
child.start()
exited = child.join(timeout=120)
if exited is None:
child.terminate()
if redis.llen(REDIS_QUEUE_KEY) == 0:
logger.info("Hurrah! Done merging Metro PDFs.")
def queue_daemon():
try:
# This is really only needed for deployments
# There might be a better way of doing this
from deployment import DEPLOYMENT_ID
with open('/tmp/worker_running.txt', 'w') as f:
f.write(DEPLOYMENT_ID)
except ImportError:
pass
stopper = threading.Event()
worker = ParentProcessor(stopper)
def signalHandler(signum, frame):
stopper.set()
sys.exit(1)
signal.signal(signal.SIGINT, signalHandler)
signal.signal(signal.SIGTERM, signalHandler)
logger.info('Starting worker')
worker.start()
def error_logging(attempts, filename):
if attempts < 2:
logger.info('Trying again...')
else:
logger.exception("Something went wrong. Please look at {}. \n".format(filename))
| StarcoderdataPython |
3454779 | <reponame>vzhong/silg<filename>silg/envs/touchdown/__init__.py
from silg.envs.touchdown import gym_wrapper | StarcoderdataPython |
6423001 | <filename>dirigible/featured_sheet/models.py<gh_stars>100-1000
# Copyright (c) 2011 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from django.db import models
from sheet.models import Sheet
class FeaturedSheet(models.Model):
sheet = models.ForeignKey(Sheet)
description = models.TextField()
more_info_url = models.CharField(max_length=1024, default='', blank=True)
def __unicode__(self):
return 'Feature: %s' % (self.sheet.name,)
| StarcoderdataPython |
6562656 | import numpy as np
import torch
def compute_histogram(gamma, m3, b_y):
activations = np.zeros((10, m3))
for i in range(10):
n_ci = float(torch.sum(b_y==i))
if n_ci>0:
gamma_i = gamma[b_y[b_y==i],:,:,:]
activations[i,:] = count_activations(gamma_i, 1e-4)
return activations
def count_activations(gamma_i, tol=1e-4):
gamma_dims = list(gamma_i.shape)
gamma_i[gamma_i<tol]=0
gamma_i[gamma_i>=tol]=1
if len(gamma_dims)>3:
activations = np.sum(gamma_i, axis=3)
activations = np.sum(activations, axis=2)
activations = np.sum(activations, axis=0)
else:
activations = np.sum(gamma_i, axis=2)
activations = np.sum(activations, axis=1)
return activations | StarcoderdataPython |
245120 |
import proton.graphicscomponent
from proton.scenemanager import SceneManager
from proton.scene import *
from proton.protonsingleton import *
import proton.motioncomponent as mc
class GameObject(object):
def __init__(self, name):
"""
Initializes the gameobject.
:param name: gameobject name.
"""
self.components = {}
self.name = name
self.__parent = None
self.children = []
self.motion = mc.MotionComponent(self)
self.graphics = proton.graphicscomponent.GraphicsComponent(self)
self.components[mc.MotionComponent.__name__] = self.motion
self.components[proton.graphicscomponent.GraphicsComponent.__name__] = self.graphics
self.__alive = True
self.__active = True
def is_alive(self):
return self.__alive
def __nullcheck(func):
def wf(*args):
try:
if not args[0].__alive:
raise Exception
else:
return func(*args)
except RuntimeError as e:
pass
return wf
@__nullcheck
def set_active(self, b):
self.__active = b
for child in self.children:
child.set_active(b)
@__nullcheck
def is_active(self):
return self.__active
@__nullcheck
def set_parent(self, parent):
if parent is None:
s = ProtonSingleton(scenemanager.SceneManager)
self.set_parent(s.currentscene.root)
elif GameObject.is_acyclic(parent, self):
if self.parent() is not None:
self.parent().children.remove(self)
self.__parent = parent
p = self.motion.position()
self.motion.set_position(p.x, p.y)
parent.children.append(self)
@__nullcheck
def parent(self):
return self.__parent
@__nullcheck
def add_component(self, typez):
comp = typez(self)
if typez.__name__ in self.components:
return self.components[type(comp).__name__]
else:
self.components[type(comp).__name__] = comp
comp.start()
return comp
@__nullcheck
def get_component(self, typez):
comp = typez(self)
if typez.__name__ in self.components:
return self.components[type(comp).__name__]
else:
return None
@__nullcheck
def transform(self):
return self.get_component(mc.MotionComponent)
@__nullcheck
def start(self):
pass
@__nullcheck
def update(self):
for k,v in self.components.items():
v.update()
@__nullcheck
def draw(self, screen):
if self.__active:
for k,v in self.components.items():
v.draw(screen)
@__nullcheck
def on_destroy(self):
for k,v in self.components.items():
v.ondestroy()
@__nullcheck
def on_collision(self, other):
for k,v in self.components.items():
v.oncollision(other)
@staticmethod
def is_acyclic(parent, nextchild):
for child in nextchild.children:
if child == parent:
return False
else:
nextok = GameObject.is_acyclic(parent, child)
if nextok is False:
return False
return True
@staticmethod
def destroy(gameobj):
if not gameobj.is_alive():
raise Exception # dont try to destroy dead object, please
else:
s = ProtonSingleton(SceneManager)
s.currentscene.allgameobjects.remove(gameobj)
if gameobj.parent is not None:
gameobj.parent().children.remove(gameobj)
for child in gameobj.children:
GameObject.destroy(child)
gameobj.__alive = False
| StarcoderdataPython |
9761334 | <filename>store2mongo.py<gh_stars>1-10
"""
Store json data to mongodb
"""
from pymongo import MongoClient
import json
import os
import argparse
def connect(args):
"""
:param args:
:return:
:rtype pymongo.collection.Collection
"""
conn = MongoClient(host=args.host, port=args.port)
return conn.get_database(args.database).get_collection(args.collection)
def insert_data(file, conn, replace=False):
"""
:param file:
:param pymongo.collection.Collection conn:
:param replace
:return:
"""
n = 0
with open(file) as fh:
data = json.load(fh)
for d in data:
mid = d['id']
d['_id'] = mid
if replace:
res = conn.replace_one({'_id': mid}, d, upsert=True)
if res.modified_count > 0 or res.upserted_id:
n += 1
else:
res = conn.insert_one(d)
n += 1
print("Insert file %s, success %d" % (file, n))
return n
def insert_directory(directory, conn, replace=False):
"""
:param directory:
:param conn:
:param replace:
:return:
"""
for rt, dirs, files in os.walk(directory):
for fs in files:
st = os.path.splitext(fs)
if len(st) > 1 and st[1] == '.json':
insert_data(os.path.realpath(os.path.join(rt, fs)), conn, replace)
def parse(args):
"""
:param args:
:return:
"""
files = args.files
conn = connect(args)
replace = args.replace
for f in files:
if os.path.isdir(f):
insert_directory(f, conn, replace)
else:
insert_data(f, conn, replace)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('files', help='File or directory to store in mongodb.', nargs='+')
parser.add_argument('-t', '--host', help='Mongodb host (default localhost)', default='localhost')
parser.add_argument('-p', '--port', help='Mongodb port (default 27017)', default=27017, type=int)
parser.add_argument('-d', '--database', help='Mongodb database (default test)', default='test')
parser.add_argument('-c', '--collection', help='Mongodb collection (default deafv)', default='deafv')
parser.add_argument('-r', '--replace', help='Whether to replace old data (default false)', action='store_true')
args = parser.parse_args()
parse(args)
| StarcoderdataPython |
5096335 | from logging import getLogger
logger = getLogger("jobs.utils")
TIMEOUT_CALLS = 60
class Subway():
"""Simple code that allows extending things by .attr.ing them"""
def __init__(self, _url, _session, version):
self._url = _url.rstrip('/')
self._session = _session
def __repr__(self):
return f"<Subway ({self._url})>"
def __getattr__(self, attr):
return Subway(f"{self._url}/{attr}", self._session)
def __call__(self, method = "get", trailing = "", data = None, _verbose = False):
fn = getattr(self._session, method)
url = f"{self._url}{trailing}"
if _verbose:
print(f"Calling {url}")
r = fn(url, json = data)
if _verbose:
print(r.content.decode())
r.raise_for_status() # good when server is good
return r.json()
class SpecSubway():
"""Subway is simple when starting, but what if we had a spec that could perform sanity checks
and parse data when __call__ ed. In this version it can read an OpenAPI spec and perform lazy
checking"""
def __init__(self, _url, _session, _spec, _version, __name = None):
self._url = _url.rstrip('/')
self._session = _session
self._spec = _spec
self._name = __name
self._caller = (
(len(_spec) == 3 and set(_spec) == set(["method", "meta", "src"])) or
(len(_spec) == 4 and set(_spec) == set(["method", "meta", "src", "response_kwargs_dict"])) or
"/" in self._spec
)
@staticmethod
def _version_latest(cls, openapi, _url, _session):
paths = openapi["paths"]
spec = openapi["components"]["schemas"]
tree = {}
for p in tuple(paths.keys()):
t = tree
for part in p.split('/')[1:]:
part = "/" if part == "" else part
t = t.setdefault(part, {})
def _dfs(tree, trail = []):
for t in tree:
if tree[t] == {}:
src = "/" + "/".join(trail)
if t!= "/":
src = src + "/" if src != "/" else src
src = src + t
try:
data = paths[src]
except:
src = src + "/"
data = paths[src]
method = tuple(data.keys())[0]
body = data[method]
dict_ = {"method": method, "meta": None, "src": src}
if "requestBody" in body:
schema_ref = body["requestBody"]["content"]["application/json"]["schema"]["$ref"].split("/")[-1]
_req_body = spec[schema_ref]
kwargs_dict = list(_req_body["properties"])
dict_["meta"] = {
"kwargs_dict": kwargs_dict,
"required": _req_body.get("required", None)
}
if "responses" in body:
schema = body["responses"]["200"]["content"]["application/json"]["schema"]
if "$ref" in schema:
schema_ref = schema["$ref"].split("/")[-1]
_req_body = spec[schema_ref]
kwargs_dict = list(_req_body["properties"])
if dict_["meta"] != None:
dict_["meta"].update({"response_kwargs_dict": kwargs_dict})
else:
dict_["meta"] = {"response_kwargs_dict": kwargs_dict}
tree[t] = dict_
else:
_dfs(tree[t], trail + [t])
_dfs(tree)
return cls(_url, _session, tree)
@staticmethod
def _version_3_0_0(self, cls, openapi, _url, _session):
paths = openapi["paths"]
spec = openapi["components"]["schemas"]
tree = {}
for p in tuple(paths.keys()):
t = tree
for part in p.split('/')[1:]:
part = "/" if part == "" else part
t = t.setdefault(part, {})
def _dfs(tree, trail = []):
for t in tree:
if tree[t] == {}:
src = "/" + "/".join(trail)
if t!= "/":
src = src + "/" if src != "/" else src
src = src + t
try:
data = paths[src]
except:
src = src + "/"
data = paths[src]
method = tuple(data.keys())[0]
body = data[method]
dict_ = {"method": method, "meta": None, "src": src}
if "requestBody" in body:
schema_ref = body["requestBody"]["content"]["application/json"]["schema"]["$ref"].split("/")[-1]
_req_body = spec[schema_ref]
kwargs_dict = list(_req_body["properties"])
dict_["meta"] = {
"kwargs_dict": kwargs_dict,
"required": _req_body.get("required", None)
}
if "responses" in body:
schema = body["responses"]["200"]["content"]["application/json"]["schema"]
if "$ref" in schema:
schema_ref = schema["$ref"].split("/")[-1]
_req_body = spec[schema_ref]
kwargs_dict = list(_req_body["properties"])
if dict_["meta"] != None:
dict_["meta"].update({"response_kwargs_dict": kwargs_dict})
else:
dict_["meta"] = {"response_kwargs_dict": kwargs_dict}
tree[t] = dict_
else:
_dfs(tree[t], trail + [t])
_dfs(tree)
return cls(_url, _session, tree)
@classmethod
def from_openapi(cls, openapi, _url, _session, _version):
try:
_version = {
'3.0.0': cls._version_3_0_0(cls, openapi, _url, _session, _version),
'latest': cls._version_latest(cls, openapi, _url, _session, _version),
}[_version]
except KeyError:
raise ValueError(f"{_version} is not a valid version")
def __repr__(self):
return f"<SpecSubway ({self._url})>"
def __getattr__(self, attr):
# https://stackoverflow.com/questions/3278077/difference-between-getattr-vs-getattribute
if self._caller and len(self._spec) == 1:
raise AttributeError(f"'.{self._name}' does not have children")
if attr not in self._spec:
raise AttributeError(f"'.{attr}' is not a valid function")
return SpecSubway(f"{self._url}/{attr}", self._session, self._spec[attr], attr)
def __call__(self, *args, _verbose = False, _parse = False, **kwargs):
if not self._caller:
raise AttributeError(f"'.{self._name}' is not an endpoint")
spec = self._spec
if self._caller and "/" in self._spec:
spec = self._spec["/"]
data = None
if spec["meta"] == None:
assert len(args) == len(kwargs) == 0, "This method does not accept any arguments"
else:
spec_meta = spec["meta"]
if "kwargs_dict" not in spec_meta:
assert len(args) == len(kwargs) == 0, "This method does not accept any arguments"
else:
kwargs_dict = spec["meta"]["kwargs_dict"]
required = spec["meta"]["required"]
data = {}
for i in range(len(args)):
if required != None:
data[required[i]] = args[i]
else:
data[kwargs_dict[i]] = args[i]
for key in kwargs:
if key not in kwargs_dict:
raise AttributeError(f"{key} is not a valid argument")
data[key] = kwargs[key]
if required != None:
for key in required:
if key not in data:
raise AttributeError(f"{key} is a required argument")
fn = getattr(self._session, spec["method"])
url = f"{self._url}"
if self._caller and "/" in self._spec:
url += "/"
if _verbose:
print(f"{spec['method'].upper()} {url}")
print("-->>", data)
r = fn(url, json = data)
if not r.status_code == 200:
raise ValueError(r.content.decode())
out = r.json()
if _parse and self._spec["meta"] != None and "response_kwargs_dict" in self._spec["meta"]:
out = [out[k] for k in self._spec["meta"]["response_kwargs_dict"]]
if len(out) == 1:
return out[0]
return out
# -------- script
def get_sub(port = 5000):
from requests import Session
session = Session()
# session.headers.update({
# })
# get webserverspec
url = f"http://127.0.0.1:{port}/api/v1/"
r = session.get(f"{url}openapi.json")
try:
r.raise_for_status()
except Exception as e:
print(r.content)
print("--------------------")
print(e)
spec = r.json()
print("001", spec.keys())
print("002", spec["openapi"])
sub = SpecSubway.from_openapi(
openapi = spec,
_url = url,
_session = session,
_version = spec["openapi"]
)
return sub
sub = get_sub()
print(sub._spec)
| StarcoderdataPython |
9730488 | # -*- coding: utf-8 -*-
from django import conf
from django.db import models, IntegrityError
import schools.models
class Session(models.Model):
"""SIS session from poldnev.ru"""
poldnev_id = models.CharField(
primary_key=True,
max_length=50,
help_text='Id смены на poldnev.ru. Заполняется автоматически командой '
'manage.py update_poldnev по информации с сайта.')
name = models.CharField(
max_length=50,
help_text='Имя смены на poldnev.ru. Заполняется автоматически командой '
'manage.py update_poldnev по информации с сайта.')
schools_session = models.OneToOneField(
schools.models.Session,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='poldnev_session',
help_text='Смена из модуля schools, соответствующая этой смене на '
'poldnev.ru')
verified = models.BooleanField(
default=False,
help_text='True, если корректность значения schools_session была '
'проверена человеком')
url = models.URLField(null=True, blank=True)
class Meta:
ordering = ('-poldnev_id',)
def __str__(self):
return self.name
class Parallel(models.Model):
"""SIS parallel from poldnev.ru"""
session = models.ForeignKey(
Session,
on_delete=models.CASCADE,
help_text='Смена',
related_name='parallels',
)
name = models.CharField(
max_length=100,
help_text='Название'
)
schools_parallel = models.ForeignKey(
schools.models.Parallel,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='poldnev_parallels',
help_text='Параллель из модуля schools, соответствующая этой параллели '
'на poldnev.ru'
)
def __str__(self):
return '{} {}'.format(self.session, self.name)
@property
def unique_key(self):
return '{}:{}'.format(self.session.poldnev_id, self.name)
@property
def url(self):
return '%s/%s' % (self.session.url, self.name)
class Meta:
unique_together = ('session', 'name')
ordering = ('session', 'name')
class StudyGroup(models.Model):
"""SIS study group from poldnev.ru"""
parallel = models.ForeignKey(
Parallel,
on_delete=models.CASCADE,
help_text='Параллель',
related_name='study_groups',
)
name = models.CharField(
max_length=100,
help_text='Название'
)
schools_group = models.OneToOneField(
schools.models.Group,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='poldnev_group',
help_text='Группа из модуля schools, соответствующая этой группе на '
'poldnev.ru'
)
def __str__(self):
return '{} {}'.format(self.parallel, self.name)
@property
def unique_key(self):
return '{}:{}'.format(self.parallel.unique_key, self.name)
class Meta:
unique_together = ('parallel', 'name')
ordering = ('parallel', 'name')
class Person(models.Model):
"""SIS person from poldnev.ru"""
poldnev_id = models.IntegerField(
primary_key=True,
help_text='Id человека на poldnev.ru. Заполняется автоматически '
'командой manage.py update_poldnev по информации с сайта.')
first_name = models.CharField(
max_length=100,
help_text='Имя человека на poldnev.ru. Заполняется автоматически '
'командой manage.py update_poldnev по информации с сайта.')
middle_name = models.CharField(
max_length=100,
help_text='Отчество человека на poldnev.ru. Заполняется автоматически '
'командой manage.py update_poldnev по информации с сайта.')
last_name = models.CharField(
max_length=200,
help_text='Фамили<NAME>еловека на poldnev.ru. Заполняется автоматически '
'командой manage.py update_poldnev по информации с сайта.')
user = models.OneToOneField(
conf.settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='poldnev_person',
help_text='Пользователь, соответствующий этому человеку на poldnev.ru')
verified = models.BooleanField(
default=False,
help_text='True, если корректность значения user была проверена '
'человеком')
class Meta:
ordering = ('last_name', 'first_name', 'middle_name')
def __str__(self):
return '{} ({})'.format(self.full_name, self.poldnev_id)
@property
def full_name(self):
return ' '.join([self.last_name, self.first_name, self.middle_name])
@property
def url(self):
return 'https://poldnev.ru/lksh/id' + str(self.poldnev_id)
@property
def last_history_entry(self):
if not hasattr(self, '_last_role'):
self._last_history_entry = self.history_entries.order_by('-session__poldnev_id').first()
return self._last_history_entry
class HistoryEntry(models.Model):
"""The history entry from poldnev.ru
Means that person participated in session in some role."""
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
help_text='Человек',
related_name='history_entries',
)
session = models.ForeignKey(
Session,
on_delete=models.CASCADE,
help_text='Смена',
related_name='history_entries',
)
study_group = models.ForeignKey(
StudyGroup,
on_delete=models.CASCADE,
related_name='history_entries',
help_text='Учебная группа. Может отсутствовать, например, для врача',
null=True,
blank=True,
)
role = models.CharField(
max_length=150,
help_text='Роль. Пустая для школьников',
blank=True,
)
def save(self, *args, **kwargs):
if (self.session is not None
and self.study_group is not None
and self.study_group.parallel is not None
and self.session != self.study_group.parallel.session):
raise IntegrityError(
'poldnev.models.HistoryEntry: '
'study_group should belong to entry\'s session'
)
super().save(*args, **kwargs)
def __str__(self):
return '{} ({}: {})'.format(
self.person.full_name, self.session, self.full_role)
@property
def full_role(self):
if not self.role:
return self.study_group.name
if not self.study_group:
return self.role
return '{}.{}'.format(self.study_group.name, self.role)
@property
def unique_key(self):
return '{}:{}:{}'.format(self.person.poldnev_id, self.session.poldnev_id, self.full_role)
@property
def url(self):
return 'https://poldnev.ru/lksh/id{}#{}'.format(
self.person.poldnev_id, self.session.poldnev_id)
class Meta:
unique_together = ('session', 'study_group', 'role', 'person')
| StarcoderdataPython |
113088 | """
After an apology from the opponent (they play C while it plays D, only happens when they play D first), if the opponent
immediately plays D, it plays another C instead of punishing in order to encourage the opponent to get back to CC-chain
(i.e. olive branch). If it plays D again, go full D.
Difference between tftGrudgeOliveCM is that it does not have an increasing threshold of required good relations each
time it is betrayed. Makes it vulnerable to strategies like alternating between D and C.
"""
# memory[0] is True if it is currently doing the olive branch procedure, memory[1] is true if it is playing all D.
def strategy(history, memory):
if history.shape[1] == 0:
return 1, [False, False]
elif history[1][-1] == 0 and history[0][-1] == 1: # new betrayal
if history.shape[1] > 2 and history[0][-2] == 0: # just exited apology cycle
return 1, [True, False]
elif memory[0]: # betrayed during the 'good relations' period
memory[1] = True
elif history[1][-1] == 1 and memory[0]:
memory[0] = False
if memory[1]:
return 0, memory
return history[1][-1], memory
| StarcoderdataPython |
9699136 | <reponame>valentingol/PANNsTensorflow
import argparse
import os
import sys
from lib.options import Options
# Running 'pytest <this file> <options>' causes the parser
# to get <this file> and <options> as arguments (unexpected).
# The following line solves the problem.
sys.argv = ['pytest']
def test_init():
opt = Options()
assert opt.initialized is False
def test_add_arguments_parser():
options = Options()
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
newparser = options.add_arguments_parser(parser)
variables = vars(newparser.parse_args())
assert 'batch_size' in variables
assert type(variables['batch_size']) is int
assert 'mixup' in variables
assert variables['mixup'] is False
assert options.initialized is True
group_names = [group.title for group in newparser._action_groups]
for group_name in ['Data', 'Model', 'Training', 'Augmentation']:
assert group_name in group_names, (f'{group_name} is missing in '
'group names')
def test_initialize_options(capsys):
options = Options()
opt = options.initialize_options()
assert hasattr(opt, "batch_size")
assert hasattr(opt, "mixup")
assert type(opt.batch_size) is int
assert options.initialized is True
assert options.parser.parse_args() == opt
# test warning
options.initialize_options()
captured = capsys.readouterr()
assert captured.out.startswith("WARNING: Options was already initialized")
def test_parse():
options = Options()
opt = options.parse()
assert hasattr(opt, "batch_size")
assert hasattr(opt, "mixup")
assert type(opt.batch_size) is int
assert options.initialized is True
assert options.parser.parse_args() == opt
def test_print_options(capsys):
options = Options()
opt = options.parse()
options.print_options(opt)
captured = capsys.readouterr()
assert "batch_size" in captured.out
assert "mixup" in captured.out
# case when a value is modified
options = Options()
opt = options.parse()
opt.mixup = True
options.print_options(opt)
captured = capsys.readouterr()
assert "mixup: True (default False)" in captured.out
def test_save_options():
options = Options()
opt = options.parse()
# test if path ending with .json works
options.save_options(opt, 'tests/tmp_opt.json')
assert os.path.isfile('tests/tmp_opt.json')
os.remove('tests/tmp_opt.json')
# test if path that not ending with .json works
options.save_options(opt, 'tests/tmp_opt')
assert os.path.isfile('tests/tmp_opt.json')
os.remove('tests/tmp_opt.json')
def test_load_options():
options = Options()
opt = options.parse()
opt.mixup = True
options.save_options(opt, 'tests/tmp_opt.json')
# test if path ending with .json works
opt2 = options.load_options('tests/tmp_opt.json')
assert hasattr(opt2, 'batch_size')
assert hasattr(opt2, 'mixup')
assert opt.batch_size == opt2.batch_size
assert opt2.mixup is True
# test if path that not ending with .json works
opt3 = options.load_options('tests/tmp_opt')
assert hasattr(opt3, 'batch_size')
assert hasattr(opt3, 'mixup')
assert opt.batch_size == opt3.batch_size
assert opt3.mixup is True
os.remove('tests/tmp_opt.json')
| StarcoderdataPython |
11398784 | <gh_stars>0
import pytest
from instascrape import Profile
@pytest.fixture(scope='module')
def profile() -> Profile:
profile_url = "https://www.instagram.com/chris_greening/"
profile_obj = Profile(profile_url)
profile_obj.load()
return profile_obj
def test_from_username(profile):
expected_profile_username = 'chris_greening'
result_profile: Profile = Profile.from_username(username=expected_profile_username)
assert result_profile.url == profile.url
| StarcoderdataPython |
322183 | #coding:utf-8
###########################################################
# SpCoTMHPi: Spatial Concept-based Path-Planning Program for SIGVerse
# Path-Planning Program by A star algorithm (ver. approximate inference)
# Path Selection: minimum cost (- log-likelihood) in a path trajectory
# <NAME> 2022/02/07
# Spacial Thanks: <NAME>, <NAME>
###########################################################
##Command:
#python3 spcotmhp_astar_metric.py trialname mapname iteration sample type_gauss
#python3 spcotmhp_astar_metric.py 3LDK_01 s3LDK_01 1 0 g
import sys
import time
import numpy as np
#import scipy as sp
from scipy.stats import multivariate_normal,multinomial
import matplotlib.pyplot as plt
import spconavi_read_data
import spconavi_save_data
#import spconavi_viterbi_path_calculate as spconavi_viterbi_path_calculate
from __init__ import *
from submodules import *
tools = spconavi_read_data.Tools()
read_data = spconavi_read_data.ReadingData()
save_data = spconavi_save_data.SavingData()
#path_calculate = spconavi_viterbi_path_calculate.PathPlanner()
#Definition of action (functions in spconavi_read_data)
action_functions = [tools.right, tools.left, tools.up, tools.down, tools.stay] #, migiue, hidariue, migisita, hidarisita]
cost_of_actions = np.log( np.ones(len(action_functions)) / float(len(action_functions)) ) #[ 1/5, 1/5, 1/5, 1/5, 1/5]) #, , 1, 1, 1, 1]
"""
#GaussMap make (no use) #Ito
def PostProb_ij(Index_temp,Mu,Sig,map_length,map_width, CostMapProb,it):
if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0):
X_temp = tools.Array_index_To_Map_coordinates(Index_temp) #map と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ multivariate_normal.pdf(X_temp, mean=Mu[it], cov=Sig[it])] ##########np.array( ) !!! np.arrayにすると, numbaがエラーを吐く
PostProb = np.sum(sum_i_GaussMulti) #sum_c_ProbCtsum_i
else:
PostProb = 0.0
return PostProb
"""
#GaussMap make (use) #Ito
def PostProbMap_Gauss(CostMapProb,Mu,Sig,map_length,map_width,it): #,IndexMap):
x,y = np.meshgrid(np.linspace(-10.0,9.92,map_width),np.linspace(-10.0,9.92,map_length))
pos = np.dstack((x,y))
#PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,map_length,map_width, CostMapProb,it) for width in xrange(map_width) ] for length in xrange(map_length) ])
PostProb=multivariate_normal(Mu[it],Sig[it]).pdf(pos)
return CostMapProb * PostProb
#GaussMap make (use) #Ito->Akira
def PostProbMap_NormalizedGauss(CostMapProb,Mu,Sig,map_length,map_width,it): #,IndexMap):
x,y = np.meshgrid(np.linspace(-10.0,9.92,map_width),np.linspace(-10.0,9.92,map_length))
pos = np.dstack((x,y))
#PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,map_length,map_width, CostMapProb,it) for width in xrange(map_width) ] for length in xrange(map_length) ])
bunbo = np.sum([ multivariate_normal(Mu[k],Sig[k]).pdf(pos) for k in range(len(Mu)) ], 0)
PostProb = multivariate_normal(Mu[it],Sig[it]).pdf(pos) / bunbo
return CostMapProb * PostProb
###↓### Sampling of goal candidates ############################################
def Sampling_goal(Otb_B, THETA):
#THETAを展開
W, W_index, Myu, S, pi, phi_l, K, L = THETA
#Prob math func of p(it | Otb_B, THETA) = Σc p(it | phi_c)p(st=Otb_B | Wc)p(c | pi)
pmf_it = np.ones(K)
for i in range(K):
sum_Ct = np.sum([phi_l[c][i] * multinomial.pmf(Otb_B, sum(Otb_B), W[c]) * pi[c] for c in range(L)])
pmf_it[i] = sum_Ct
#Normalization
pmf_it_n = np.array([pmf_it[i] / float(np.sum(pmf_it)) for i in range(K)])
#Sampling it from multinomial distribution
sample_it = multinomial.rvs(Sampling_J, pmf_it_n, size=1, random_state=None)
print(sample_it)
goal_candidate = []
for it in range(K):
count_it = 0
while (count_it < sample_it[0][it]):
goal_candidate += [tools.Map_coordinates_To_Array_index(multivariate_normal.rvs(mean=Myu[it], cov=S[it], size=1, random_state=None))]
count_it += 1
#Xt_max = Map_coordinates_To_Array_index( [ Xp[pox.index(max(pox))][0], Xp[pox.index(max(pox))][1] ] ) #[0.0,0.0] ##確率最大の座標候補
goal_candidate_tuple = [(goal_candidate[j][1], goal_candidate[j][0]) for j in range(Sampling_J)]
print("Goal candidates:", goal_candidate_tuple)
return goal_candidate_tuple
###↑### Sampling of goal candidates ############################################
### A star algorithm (by <NAME>) ############################################
def a_star(start, goal, maze, action_functions, cost_of_actions, PathWeightMap):
if (maze[goal[0]][goal[1]] != 0):
print("[ERROR] goal",maze[goal[0]][goal[1]],"is not 0.")
###START A*
open_list = []
open_list_cost = []
open_list_key = []
closed_list = []
closed_list_cost = []
closed_list_key = []
open_list.append(start)
open_list_cost.append(0)
open_list_key.append(0 + tools.Manhattan_distance(start, goal))
OYA = {}
ko = (0), (0)
Path = []
while open_list:
sorted_idx = np.argsort(open_list_key, kind="stable")
pop_idx = sorted_idx[0]
p = open_list.pop(pop_idx)
p_cost = open_list_cost.pop(pop_idx)
p_key = open_list_key.pop(pop_idx)
closed_list.append(p)
closed_list_cost.append(p_cost)
closed_list_key.append(p_key)
if p == goal:
break
for act_func, act_cost in zip(action_functions, cost_of_actions):
q = act_func(p)
if (int(maze[q]) != 0):
continue
q_cost = p_cost - act_cost - np.log(PathWeightMap[q[0]][q[1]]) #current sum cost and action cost
q_pev = tools.Manhattan_distance(q, goal) * np.log(float(len(action_functions))) #heuristic function
q_key = q_cost - q_pev
if q in open_list:
idx = open_list.index(q)
key = open_list_key[idx]
if key > q_key:
open_list_key[idx] = q_key
open_list_cost[idx] = q_cost
elif q in closed_list:
idx = closed_list.index(q)
key = closed_list_key[idx]
if key > q_key:
closed_list.pop(idx)
closed_list_cost.pop(idx)
closed_list_key.pop(idx)
open_list.append(q)
open_list_cost.append(q_cost)
open_list_key.append(q_key)
#plt.quiver(p[1], p[0], (q[1]-p[1]), (q[0]-p[0]), angles='xy', scale_units='xy', scale=1, color="tab:red")
OYA[(q[1], q[0])] = (p[1], p[0])
ko = (q[1]), (q[0])
#print(ko)
else:
open_list.append(q)
open_list_cost.append(q_cost)
open_list_key.append(q_key)
#plt.quiver(p[1], p[0], (q[1]-p[1]), (q[0]-p[0]), angles='xy', scale_units='xy', scale=1, color="tab:red")
OYA[(q[1], q[0])] = (p[1], p[0])
ko = (q[1]), (q[0])
#print(ko)
#最適経路の決定: ゴールから親ノード(どこから来たか)を順次たどっていく
#i = len(OYA)
#for oyako in reversed(OYA):
ko_origin = ko
ko = (goal[1], goal[0])
print(ko,goal)
#for i in range(p_cost):
while(ko != (start[1],start[0])):
#print(OYA[ko])
try:
Path = Path + [OYA[ko]]
except KeyError:
ko = ko_origin
Path = Path + [OYA[ko]]
print("NOT END GOAL.")
ko = OYA[ko]
#i = len(Path)
#print(i, ko)
#i -= 1
print(goal,": Total cost using A* algorithm is "+ str(p_cost))
return Path, p_cost
### A star algorithm (by <NAME>) ############################################
#################################################
print("[START] SpCoTMHP. (A star metric path)")
#map dataの入った部屋環境folder name(学習済みparameter folder name) is requested
#Request a folder name for learned parameters.
trialname = sys.argv[1]
#map file name is requested
mapname = sys.argv[2]
#Request iteration value
iteration = sys.argv[3] #1
#Request sample value
sample = sys.argv[4] #0
#重みはガウスか正規化ガウスか
type_gauss = sys.argv[5] # g: gauss, ng: normalized gauss
#Request the index number of the robot initial position
#init_position_num = sys.argv[5] #0
#Request the file number of the speech instruction
#speech_num = sys.argv[6] #0
# For saveing the metric path
Like_save = [ [0.0 for atem in range(K)] for aky in range(K) ]
Distance_save = [ [0.0 for atem in range(K)] for aky in range(K) ]
##FullPath of folder
filename = outputfolder_SIG + trialname #+ "/"
print(filename, iteration, sample)
outputfile = filename + navigation_folder #+ "astar_node/" #outputfolder + trialname + navigation_folder #Ito
if (type_gauss == "g"):
outputsubfolder = outputfile + "astar_node_gauss/"
else:
outputsubfolder = outputfile + "astar_node/"
#"T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
Makedir( outputfile )
Makedir( outputsubfolder )
#Read the files of learned parameters #THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
THETA = read_data.ReadParameters(iteration, sample, filename, trialname)
W, W_index, Mu, Sig, pi, phi_l, K, L = THETA
#W_index = THETA[1]
#Ito# 遷移確率の低いエッジは計算しないようにするために擬似的にpsi_setting.csvを読み込む
#Ito# psiそのものの確率値ではないことに注意
psi = [ [0.0 for atem in range(K)] for aky in range(K) ]
c=0
for line in open(filename + "/" + trialname + '_psi_' + 'setting.csv', 'r'):
itemList = line[:-1].split(',')
for i in range(len(itemList)):
if itemList[i] != "":
psi[c][i] = float(itemList[i])
c = c + 1
##Read the map file
gridmap = read_data.ReadMap(outputfile)
map_length, map_width = gridmap.shape
#GridMapProb = 1*(gridmap == 0)
##Read the cost map file
#CostMap = read_data.ReadCostMap(outputfile)
#CostMapProb_tmp = (100.0 - CostMap)/100
#CostMapProb = CostMapProb_tmp * GridMapProb
#Read the probabilistic cost map file
CostMapProb = read_data.ReadCostMapProb(outputfile)
for st_i in range(K):
for gl_i in range(K):
if st_i == gl_i:
Distance_save[st_i][gl_i]=0
Like_save[st_i][gl_i]=0
elif psi[st_i][gl_i] == 1:
St=st_i
Gl=gl_i
outputname = outputsubfolder + "Astar_SpCoTMHP_"+"S"+str(St)+"_G"+str(Gl)
if (type_gauss == "g"):
PathWeightMap = PostProbMap_Gauss(CostMapProb,Mu,Sig,map_length,map_width,Gl)
else:
PathWeightMap = PostProbMap_NormalizedGauss(CostMapProb,Mu,Sig,map_length,map_width,Gl)
#####描画
plt.imshow(gridmap + (40+1)*(gridmap == -1), origin='lower', cmap='binary', vmin = 0, vmax = 100, interpolation='none') #, vmin = 0.0, vmax = 1.0)
plt.xticks(rotation=90)
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
#plt.xlim([380,800]) #x軸の範囲
#plt.ylim([180,510]) #y軸の範囲
plt.xlabel('X', fontsize=10)
plt.ylabel('Y', fontsize=10)
#plt.xticks(np.arange(width), np.arange(width))
#plt.yticks(np.arange(height), np.arange(height))
plt.gca().set_aspect('equal')
# スタートとゴールをプロットする
#plt.plot(start[1], start[0], "D", color="tab:blue", markersize=1)
#plt.plot(goal[1], goal[0], "D", color="tab:pink", markersize=1)
#plt.show()
###goalの候補を複数個用意する
#goal_candidate = Sampling_goal(Otb_B, THETA) #(0,0)
# スタート:ガウス平均
ss=tools.Map_coordinates_To_Array_index(Mu[St])
start=(ss[1],ss[0]) #スタート位置を指定
# ゴール:ガウス平均
gg=tools.Map_coordinates_To_Array_index(Mu[Gl])
#print(gg[0])
goal_candidate = [[gg[1],gg[0]]]
#J = Sampling_J #len(goal_candidate)
#if(J != THETA[6]):
# print("[WARNING] J is not K",J,K)
J=1
p_cost_candidate = [0.0 for j in range(J)]
Path_candidate = [[0.0] for j in range(J)]
Like_candidate = [0.0 for j in range(J)]
#print(goal_candidate)
if (SAVE_time == 1):
#Substitution of start time
start_time = time.time()
###goal候補ごとにA*を実行
for gc_index in range(J):
goal = goal_candidate[gc_index]
Path, p_cost = a_star(start, goal, gridmap, action_functions, cost_of_actions, PathWeightMap)
Like_candidate[gc_index] = p_cost
p_cost_candidate[gc_index] = p_cost / float(len(Path))
Path_candidate[gc_index] = Path
### select the goal of expected cost
expect_gc_index = np.argmin(p_cost_candidate)
Path = Path_candidate[expect_gc_index]
goal = goal_candidate[expect_gc_index]
print("Goal:", goal)
if (SAVE_time == 1):
#PP終了時刻を保持
end_pp_time = time.time()
time_pp = end_pp_time - start_time
fp = open( outputname + "_time_pp.txt", 'w')
fp.write(str(time_pp)+"\n")
fp.close()
for i in range(len(Path)):
plt.plot(Path[i][0], Path[i][1], "s", color="tab:red", markersize=1)
#The moving distance of the path
Distance = tools.PathDistance(Path)
Distance_save[st_i][gl_i] = Distance
Like_save[st_i][gl_i]=Like_candidate[expect_gc_index] # 実際は尤度ではなくA*のコスト値
#Save the moving distance of the path
save_data.SavePathDistance(Distance, outputname)
print("Path distance using A* algorithm is "+ str(Distance))
#計算上パスのx,yが逆になっているので直す
Path_inv = [[Path[t][1], Path[t][0]] for t in range(len(Path))]
Path_inv.reverse()
Path_ROS = Path_inv #使わないので暫定的な措置
#Save the path
save_data.SavePath(start, [goal[1], goal[0]], Path_inv, Path_ROS, outputname)
#Read the emission probability file
#PathWeightMap = ReadProbMap(outputfile)
#Save the log-likelihood of the path
#PathWeightMapとPathからlog likelihoodの値を再計算する
LogLikelihood_step = np.zeros(T_horizon)
LogLikelihood_sum = np.zeros(T_horizon)
for i in range(T_horizon):
if (i < len(Path)):
t = i
else:
t = len(Path) -1
#print PathWeightMap.shape, Path[t][0], Path[t][1]
LogLikelihood_step[i] = np.log(PathWeightMap[ Path_inv[t][0] ][ Path_inv[t][1] ])
if (t == 0):
LogLikelihood_sum[i] = LogLikelihood_step[i]
elif (t >= 1):
LogLikelihood_sum[i] = LogLikelihood_sum[i-1] + LogLikelihood_step[i]
#すべてのステップにおけるlog likelihoodの値を保存
save_data.SaveLogLikelihood(LogLikelihood_step,0,0,outputname)
#すべてのステップにおける累積報酬(sum log likelihood)の値を保存
save_data.SaveLogLikelihood(LogLikelihood_sum,1,0,outputname)
#Save path trajectory in the map as a color image
plt.savefig(outputname + '_Path.png', dpi=300)#, transparent=True
plt.savefig(outputname + '_Path.pdf', dpi=300)#, transparent=True
plt.clf()
else:
Distance_save[st_i][gl_i]=0
Like_save[st_i][gl_i]=0
print("[END] SpCoTMHP. (A star metric path)")
#if (type_gauss == "g"):
# outputsubfolder = outputfile + "Astar_SpCoTMHP_gauss_"
#else:
outputsubfolder = outputfile + "Astar_SpCoTMHP_"
np.savetxt(outputsubfolder+"distance.csv",Distance_save,delimiter=",")
np.savetxt(outputsubfolder+"cost.csv",Like_save,delimiter=",")
| StarcoderdataPython |
5164497 | from jig.commands.base import BaseCommand
from jig.commands.hints import AFTER_INIT
from jig.gitutils.hooking import hook
from jig.plugins import initializer
try:
import argparse
except ImportError: # pragma: no cover
from backports import argparse
_parser = argparse.ArgumentParser(
description='Initialize a Git repository for use with Jig',
usage='jig init [-h] [PATH]')
_parser.add_argument(
'path', default='.', nargs='?',
help='Path to the Git repository')
class Command(BaseCommand):
parser = _parser
def process(self, argv):
path = argv.path
with self.out() as printer:
hook(path)
initializer(path)
printer(
'Git repository has been initialized for use with Jig.'
)
printer(AFTER_INIT)
| StarcoderdataPython |
11331005 | from io import StringIO
import pytest
from snowfakery.data_generator import generate
from snowfakery.data_gen_exceptions import DataGenError
class TestFaker:
def test_top_field_unknown(self):
yaml = """
- bobject: OBJ
fields:
first_name:
fake:
first_name
"""
with pytest.raises(DataGenError):
generate(StringIO(yaml), {}, None)
def test_secondary_field_unknown(self):
yaml = """
- object: OBJ
bfields:
first_name:
fake:
first_name
"""
with pytest.raises(DataGenError):
generate(StringIO(yaml), {}, None)
| StarcoderdataPython |
1626122 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import sqlite3
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
DB = SQLAlchemy(app)
# Create user class
class User(DB.Model):
'''Instantiate user class'''
id = DB.Column(DB.Integer, primary_key=True)
name = DB.Column(DB.String(20), unique=True, nullable=False)
def __repr__(self):
return '<User {}>'.format(self.name)
class Tweet(DB.Model):
'''Instantiate tweet class'''
id = DB.Column(DB.Integer, primary_key=True)
text = DB.Column(DB.Unicode(280))
user_id = DB.Column(DB.Integer, DB.ForeignKey('user.id'), nullable=False)
user = DB.relationship('User', backref=DB.backref('tweets', lazy=True))
def __repr__(self):
return '<Tweet {}>'.format(self.text)
| StarcoderdataPython |
4955169 | """
This testsuite contains the initial test cases for testing the
f5 converter tool along with its options / parameters
"""
import json
import logging
import os
import subprocess
import sys
import pytest
import yaml
from avi.migrationtools.avi_migration_utils import get_count, set_update_count
from avi.migrationtools.f5_converter.f5_converter import F5Converter, get_terminal_args,\
ARG_DEFAULT_VALUE
from avi.migrationtools.test.common.excel_reader \
import percentage_success, output_sanitization, output_vs_level_status
from avi.migrationtools.test.common.test_clean_reboot \
import verify_controller_is_up, clean_reboot
from avi.migrationtools.test.common.test_tenant_cloud \
import create_segroup, create_vrf_context
import ansible_runner
config_file = pytest.config.getoption("--config")
input_file = pytest.config.getoption("--file")
input_file_version = pytest.config.getoption("--fileVersion")
output_file = pytest.config.getoption("--out")
if not output_file:
output_file = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'output'))
input_file_v10 = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'bigip_v10.conf'))
input_file_v11 = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'hol_advanced_bigip.conf'))
input_role_config_file = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'custom_config.yaml'))
input_config_yaml = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'config.yaml'))
v10 = '10'
v11 = '11'
if input_file_version == '10' and input_file:
v10 = '10'
input_file_v10 = input_file
elif input_file_version == '11' and input_file:
v11 = '11'
input_file_v11 = input_file
elif any([input_file_version, input_file]):
print("Both arguments 'input_file_version' and 'input_file' are mandatory")
sys.exit(0)
with open(config_file) as f:
file_attribute = yaml.load(f, Loader=yaml.Loader)
setup = dict(
controller_version_v17=file_attribute['controller_version_v17'],
file_version_v10=v10,
file_version_v11=v11,
version=True,
option=file_attribute['option'],
controller_ip_17_1_1=file_attribute['controller_ip_17_1_1'],
controller_user_17_1_1=file_attribute['controller_user_17_1_1'],
controller_password_17_1_1=file_attribute['controller_password_17_1_1'],
f5_host_ip_v10=file_attribute['f5_host_ip_v10'],
f5_host_ip_v11=file_attribute['f5_host_ip_v11'],
f5_ssh_user=file_attribute['f5_ssh_user'],
f5_ssh_user_10=file_attribute['f5_ssh_user_10'],
f5_ssh_password=file_attribute['f5_ssh_password'],
f5_ssh_port=file_attribute['f5_ssh_port'],
no_profile_merge=file_attribute['no_profile_merge'],
prefix=file_attribute['prefix'],
cloud_name=file_attribute['cloud_name'],
tenant=file_attribute['tenant'],
input_folder_location=os.path.abspath(os.path.join(os.path.dirname(__file__), 'certs')),
config_file_name_v10=input_file_v10,
config_file_name_v11=input_file_v11,
partition_config='new', # this is new
f5_key_file='cd_rt_key.pem',
ignore_config=os.path.abspath(os.path.join(os.path.dirname(__file__),
'ignore-config.yaml')),
patch=os.path.abspath(os.path.join(os.path.dirname(__file__),
'patch.yaml')),
vs_filter='EngVIP,F5-VIP-80-001,F5-VIP-443-002',
not_in_use=True,
skip_file=False,
ansible=True,
baseline_profile=None,
f5_passphrase_file=os.path.abspath(os.path.join(
os.path.dirname(__file__), 'passphrase.yaml')),
f5_ansible_object=os.path.abspath(os.path.join(
os.path.dirname(__file__), 'output',
'avi_config_create_object.yml')),
vs_level_status=True,
test_vip=None,
output_file_path=output_file,
vrf='test_vrf',
segroup='test_se',
custom_config_file=input_role_config_file,
distinct_app_profile=True,
args_config_file=input_config_yaml
)
if not os.path.exists(setup.get("output_file_path")):
os.mkdir(setup.get("output_file_path"))
formatter = '[%(asctime)s] %(levelname)s [%(funcName)s:%(lineno)d] %(message)s'
logging.basicConfig(filename=os.path.join(
setup.get('output_file_path'), 'converter.log'),
level=logging.DEBUG, format=formatter)
mylogger = logging.getLogger(__name__)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def f5_conv(
bigip_config_file=None, skip_default_file=False, f5_config_version=None,
input_folder_location=os.path.abspath(os.path.join(os.path.dirname(__file__), 'certs')),
output_file_path=output_file, option=ARG_DEFAULT_VALUE['option'],
user=ARG_DEFAULT_VALUE['user'],
password=<PASSWORD>, controller_ip=None,
tenant='admin', cloud_name=ARG_DEFAULT_VALUE['cloud_name'],
vs_state=ARG_DEFAULT_VALUE['vs_state'],
controller_version=None, f5_host_ip=None, f5_ssh_user=None,
f5_ssh_password=<PASSWORD>, f5_ssh_port=None, f5_key_file=None,
ignore_config=None, partition_config=None, version=False,
no_profile_merge=False, patch=None, vs_filter=None,
ansible_skip_types=[], ansible_filter_types=[], ansible=False,
prefix=None, convertsnat=False, not_in_use=False, baseline_profile=None,
f5_passphrase_file=None, vs_level_status=False, test_vip=None,
vrf=None, segroup=None, custom_config=None, skip_pki=False,
distinct_app_profile=False, reuse_http_policy=False, args_config_file=None):
args = Namespace(bigip_config_file=bigip_config_file,
skip_default_file=skip_default_file,
f5_config_version=f5_config_version,
input_folder_location=input_folder_location,
output_file_path=output_file_path, option=option,
user=user, password=password, controller_ip=controller_ip,
tenant=tenant, cloud_name=cloud_name, vs_state=vs_state,
controller_version=controller_version,
f5_host_ip=f5_host_ip, f5_ssh_user=f5_ssh_user,
f5_ssh_password=<PASSWORD>,
f5_ssh_port=f5_ssh_port, f5_key_file=f5_key_file,
ignore_config=ignore_config,
partition_config=partition_config, version=version,
no_object_merge=no_profile_merge, patch=patch,
vs_filter=vs_filter, ansible_skip_types=ansible_skip_types,
ansible_filter_types=ansible_filter_types, ansible=ansible,
prefix=prefix, convertsnat=convertsnat,
not_in_use=not_in_use, baseline_profile=baseline_profile,
f5_passphrase_file=f5_passphrase_file,
vs_level_status=vs_level_status, test_vip=test_vip,
vrf=vrf, segroup=segroup,
custom_config=custom_config,
skip_pki=skip_pki,
distinct_app_profile=distinct_app_profile,
reuse_http_policy=reuse_http_policy,
args_config_file=args_config_file)
args = get_terminal_args(terminal_args=args)
f5_converter = F5Converter(args)
avi_config = f5_converter.convert()
return avi_config
class TestF5Converter:
@pytest.fixture
def cleanup(self):
import avi.migrationtools.f5_converter.conversion_util as conv
import shutil
conv.csv_writer_dict_list = list()
if os.path.exists(output_file):
for each_file in os.listdir(output_file):
file_path = os.path.join(output_file, each_file)
try:
if os.path.isfile(file_path):
if file_path.endswith('.log'):
open('converter.log', 'w').close()
else:
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_1_0
def test_download_v11(self, cleanup):
"""
Download Input File Flow, Test for Controller v17.1.1
"""
f5_conv(f5_host_ip=setup.get('f5_host_ip_v11'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_user=setup.get('f5_ssh_user'),
f5_ssh_password=setup.get('f5_ssh_password'),
f5_ssh_port=setup.get('f5_ssh_port'),
f5_config_version=setup.get('file_version_v11'),
option=setup.get('option'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_1<PASSWORD>'),
skip_pki=True)
# Dont have version 10 F5 instance so commenting the tests
# @pytest.mark.skip_travis
# @pytest.mark.TCID1_48_1497_2_0
# def test_download_v10(self, cleanup):
# """
# Download Input File Flow, Test for Controller v17.1.1
# """
# f5_conv(f5_host_ip=setup.get('f5_host_ip_v10'),
# controller_version=setup.get('controller_version_v17'),
# f5_ssh_user=setup.get('f5_ssh_user_10'),
# f5_ssh_password=setup.get('f5_ssh_password'),
# f5_ssh_port=setup.get('f5_ssh_port'),
# # Dont have version 10 F5 instance
# # f5_config_version=setup.get('file_version_v10'),
# skip_pki=True)
#
# @pytest.mark.skip_travis
# @pytest.mark.TCID1_48_1497_3_0
# def test_output_sanitization_v10(self, cleanup):
# f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
# # Dont have version 10 F5 instance
# # f5_config_version=setup.get('file_version_v10'),
# controller_version=setup.get('controller_version_v17'),
# f5_ssh_port=setup.get('f5_ssh_port'),
# output_file_path=output_file,
# skip_pki=True)
# self.excel_path = os.path.abspath(os.path.join(
# output_file, 'bigip_v10-ConversionStatus.xlsx'))
# self.json_path = os.path.abspath(os.path.join(
# output_file, 'bigip_v10-Output.json'))
# self.log_path = os.path.abspath(os.path.join(
# output_file, 'converter.log'))
# assert output_sanitization(self.excel_path, self.json_path,
# self.log_path)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_4_0
def test_output_sanitization_v11(self, cleanup):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_port=setup.get('f5_ssh_port'),
output_file_path=output_file,
skip_pki=True)
self.excel_path = os.path.abspath(os.path.join(
output_file, 'hol_advanced_bigip-ConversionStatus.xlsx'))
self.json_path = os.path.abspath(os.path.join(
output_file, 'hol_advanced_bigip-Output.json'))
self.log_path = os.path.abspath(os.path.join(
output_file, 'converter.log'))
assert output_sanitization(self.excel_path,
self.json_path,
self.log_path)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_5_0
def test_excel_report_v11(self, cleanup):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_port=setup.get('f5_ssh_port'),
output_file_path=output_file)
percentage_success(os.path.join(output_file,
'hol_advanced_bigip-ConversionStatus.xlsx'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_6_0
def test_without_options_v10(self, cleanup):
"""
Check the Configuration file for V10
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_port=setup.get('f5_ssh_port'),
f5_config_version=setup.get('file_version_v10'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_7_0
def test_without_options_v11(self, cleanup):
"""
Check the configuration file for v11
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_8_0
def test_no_profile_merge_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
no_profile_merge=setup.get('no_profile_merge'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_9_0
def test_no_profile_merge_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
no_profile_merge=setup.get('no_profile_merge'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_10_0
def test_prefix_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Prefix Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
prefix=setup.get('prefix'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_11_0
def test_prefix_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Prefix Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
prefix=setup.get('prefix'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_12_0
def test_cloud_name_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Prefix Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
cloud_name=setup.get('cloud_name'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_13_0
def test_cloud_name_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Prefix Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
cloud_name=setup.get('cloud_name'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_14_0
def test_tenant_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Tenant Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
tenant=setup.get('tenant'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_15_0
def test_tenant_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Tenant Added
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
tenant=setup.get('tenant'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_16_0
def test_input_folder_path_not_provided_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Input Folder path not provided
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
input_folder_location=setup.get('input_folder_location'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_17_0
def test_input_folder_path_not_provided_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Input Folder path not provided
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
input_folder_location=setup.get('input_folder_location'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_18_0
def test_ignore_config_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
ignore_config option usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
ignore_config=setup.get('ignore_config'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_19_0
def test_ignore_config_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
ignore_config option usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
ignore_config=setup.get('ignore_config'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_20_0
def test_patch_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Patch option usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
patch=setup.get('patch'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_21_0
def test_patch_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
Patch option usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
patch=setup.get('patch'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_22_0
def test_not_in_use_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
not_in_use=setup.get('not_in_use'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_23_0
def test_not_in_use_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
not_in_use=setup.get('not_in_use'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_24_0
def test_passphrase_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
f5_passphrase_file=setup.get('f5_passphrase_file'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_25_0
def test_passphrase_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
No_profile_merge Flag Reset
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
f5_passphrase_file=setup.get('f5_passphrase_file'),
skip_pki=True)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_27_0
def test_reboot_clean_v10_17_1_1(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(file_attribute['controller_ip_17_1_1'],
file_attribute[
'controller_user_17_1_1'],
file_attribute[
'controller_password_17_1_1'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_1_1'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_26_0
def test_auto_upload_v10_17_1_1(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
AutoUpload Flow
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
option=setup.get('option'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=<PASSWORD>.<PASSWORD>('controller_password_<PASSWORD>'),
skip_pki=True)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_27_0
def test_reboot_clean_v10_17_1_1(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(file_attribute['controller_ip_17_1_1'],
file_attribute[
'controller_user_17_1_1'],
file_attribute[
'controller_password_17_1_1'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_<PASSWORD>'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_28_0
def test_cross_tenant_auto_upload(self, cleanup):
"""
Input File on Local Filesystem, Test for cloning of cross tenant
references on the Controller,
AutoUpload Flow
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
option=setup.get('option'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_<PASSWORD>'),
skip_pki=True)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_29_0
def test_reboot_clean_v11_17_1_1(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(file_attribute['controller_ip_17_1_1'],
file_attribute[
'controller_user_17_1_1'],
file_attribute[
'controller_password_17_1_1'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_1_1'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_30_0
def test_auto_upload_v11_17_1_1(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
AutoUpload Flow
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
output_file_path=setup.get('output_file_path'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
option=setup.get('option'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_1<PASSWORD>'),
skip_pki=True)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_31_0
def test_create_ansible_object_creation_v11(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1
Create Ansible Script based on Flag
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
output_file_path=setup.get('output_file_path'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
ansible=setup.get('ansible'),
skip_pki=True)
file_name = output_file + '/avi_config_create_object.yml'
with open(file_name) as o_file:
file_object = yaml.load(o_file, Loader=yaml.Loader)
assert file_object[0].get('tasks', False)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_32_0
def test_reboot_clean_ansible_v11_17_1_1(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(file_attribute['controller_ip_17_1_1'],
file_attribute[
'controller_user_17_1_1'],
file_attribute[
'controller_password_<PASSWORD>'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_<PASSWORD>'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_33_0
def test_ansible_object_auto_upload_v11_17_1_1(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1
AutoUpload Flow
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
output_file_path=setup.get('output_file_path'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v11'),
f5_ssh_port=setup.get('f5_ssh_port'),
ansible=setup.get('ansible'),
skip_pki=True)
print(subprocess.check_output('pip install avisdk --upgrade',
shell=True))
print(subprocess.check_output(
'/usr/local/bin/ansible-galaxy install avinetworks.avisdk avinetworks.avimigrationtools',
shell=True))
try:
output = ansible_runner.run(
playbook = setup.get('f5_ansible_object'),
extravars = {'controller': setup.get('controller_ip_17_1_1'),
'username': setup.get('controller_user_17_1_1'),
'password':setup.get('controller_password_<PASSWORD>')},
verbosity = 3,
quiet = True)
playbook_stats = output.stats
playbook_output = output.stdout.read()
mylogger.info('ansible playbook output: \n{}'.format(playbook_output))
except:
mylogger.info('Failed to create object on controller output: \n{}'.format(playbook_output))
output = False
assert output
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_34_0
def test_create_ansible_object_v10(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1
Create Ansible Script based on Flag
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
output_file_path=setup.get('output_file_path'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'),
ansible=setup.get('ansible'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_35_0
def test_vs_level_status_true_v10(self, cleanup):
"""
Input File on Local Filesystem, VS level option true usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
vs_level_status=setup.get('vs_level_status'),
f5_ssh_port=setup.get('f5_ssh_port'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_36_0
def test_vs_level_status_false_v10(self, cleanup):
"""
Input File on Local Filesystem, VS level option false usage
"""
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
controller_version=setup.get('controller_version_v17'),
f5_config_version=setup.get('file_version_v10'),
f5_ssh_port=setup.get('f5_ssh_port'))
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_37_0
def test_http_cookie_type_on_file_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_port=setup.get('f5_ssh_port'),
output_file_path=setup.get('output_file_path'))
file_name = output_file + '/bigip_v10-Output.json'
with open(file_name) as o_file:
file_object = yaml.load(o_file, Loader=yaml.Loader)
persistence_profiles = file_object['ApplicationPersistenceProfile']
for p_type in persistence_profiles:
if "COOKIE" in p_type['persistence_type']:
assert (p_type['persistence_type'] ==
'PERSISTENCE_TYPE_HTTP_COOKIE')
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_38_0
def test_http_cookie_type_on_file_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
f5_ssh_port=setup.get('f5_ssh_port'),
output_file_path=setup.get('output_file_path'))
file_name = output_file + '/hol_advanced_bigip-Output.json'
with open(file_name) as o_file:
file_object = yaml.load(o_file, Loader=yaml.Loader)
persistence_profiles = file_object['ApplicationPersistenceProfile']
for p_type in persistence_profiles:
if "COOKIE" in p_type['persistence_type']:
assert (p_type['persistence_type'] ==
'PERSISTENCE_TYPE_HTTP_COOKIE')
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_39_0
def test_vrf_flag_on_file_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
vrf=setup.get('vrf'),
)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_39_0
def test_vrf_flag_on_file_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
segroup=setup.get('segroup')
)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_40_0
def test_vrf_flag_on_file_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
vrf=setup.get('vrf'),
)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_40_0
def test_vrf_flag_on_file_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
segroup=setup.get('segroup')
)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_41_0
def test_error_and_warning_count_on_file_v11(self):
set_update_count()
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
assert get_count('error') == 0
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_42_0
def test_error_and_warning_count_on_file_v10(self):
set_update_count()
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
assert get_count('error') == 0
@pytest.mark.travis
def test_pool_hm_ref_v11(self, cleanup):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['Pool']
pool_with_hm = [data for data in vs_object if data['name'] == "hol-advanced-pool-01"]
# Check if health monitor ref migrated to Avi
assert pool_with_hm[0].get('health_monitor_refs')
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_43_0
def test_pool_sharing_on_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [data for data in vs_object if data['name'] == "11-hol-advanced-http-vs"]
second_vs = [data for data in vs_object if data['name'] == "12-hol-advanced-http-vs"]
first_pool = first_vs[0]['pool_ref'].split(
'name=')[1].split('&')[0]
second_pool = second_vs[0]['pool_ref'].split(
'name=')[1].split('&')[0]
assert first_pool == second_pool
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_44_0
def test_pool_without_sharing_on_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [data for data in vs_object if data['name'] == "10-hol-advanced-http-vs"]
second_vs = [data for data in vs_object if data['name'] == "11-hol-advanced-http-vs"]
first_pool = first_vs[0]['pool_ref'].split('name=')[1].split('&')[0]
second_pool = second_vs[0]['pool_ref'].split('name=')[1].split(
'&')[0]
assert first_pool != second_pool
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_45_0
def test_pool_sharing_on_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "bigip_v10-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [data for data in vs_object if data['name'] ==
"F5-v10-VIP-443-002"]
second_vs = [data for data in vs_object if data['name'] ==
"F5-v10-VIP-443-003"]
first_pool = first_vs[0]['pool_ref'].split('name=')[1].split('&')[0]
second_pool = second_vs[0]['pool_ref'].split('name=')[1].split(
'&')[0]
assert first_pool == second_pool
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_46_0
def test_pool_without_sharing_on_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "bigip_v10-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [data for data in vs_object if data['name'] ==
"F5-v10-VIP-443-001"]
second_vs = [data for data in vs_object if data['name'] ==
"F5-v10-VIP-443-002"]
first_pool = first_vs[0]['pool_ref'].split('name=')[1].split('&')[0]
second_pool = second_vs[0]['pool_ref'].split('name=')[1].split(
'&')[0]
assert first_pool != second_pool
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_47_0
def test_rule_config_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'),
custom_config=setup.get('custom_config_file'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_datascript = data['VSDataScriptSet']
vs_object = data['VirtualService']
http_policy_set = data['HTTPPolicySet']
network_security_policy = data['NetworkSecurityPolicy']
vs_data = [data for data in vs_object if data['name']
== "01-hol-advanced-http-vs"]
data_script = vs_data[0]['vs_datascripts']
for i in data_script:
ds_name = i['vs_datascript_set_ref'].split('name=')[1].split(
'&')[0]
script_set = [data['name'] for data in vs_datascript if
data['name'] == ds_name][0]
print(script_set, " ", ds_name)
assert script_set == ds_name
vs_data = [data for data in vs_object if data['name']
== "11-hol-advanced-http-vs"]
httppolicies = vs_data[0]['http_policies']
for i in httppolicies:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
httppolicy = [data['name'] for data in http_policy_set
if data['name'] == policy_name][0]
print(policy_name, " ", httppolicy)
assert policy_name == httppolicy
vs_data_for_policy_set = [data for data in vs_object if
data['name'] == "21-hol-advanced-http-vs"]
vsdatascript = vs_data_for_policy_set[0]['vs_datascripts']
for i in vsdatascript:
ds_name = i['vs_datascript_set_ref'].split('name=')[1].split(
'&')[0]
script_set = [data['name'] for data in vs_datascript
if data['name'] == ds_name][0]
print(script_set, " ", ds_name)
assert script_set == ds_name
vs_data = [data for data in vs_object if data['name'] == "32-hol-advanced-http-vs"]
httppolicy = vs_data[0]['http_policies']
for i in httppolicy:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
if policy_name == '_sys_https_redirect-EngVIP':
httppolicy = [data['name'] for data in http_policy_set if
data['name'] == policy_name
and '_sys_https_redirect-EngVIP'][0]
print(policy_name, " ", httppolicy)
assert policy_name == httppolicy
vs_data = [data for data in vs_object if data['name'] == "41-hol-advanced-http-vs"]
policy_ref = vs_data[0]['network_security_policy_ref']
policy_name = policy_ref.split('name=')[1].split('&')[0]
network_profile_name = [i['name'] for i in network_security_policy
if i['name'] == policy_name][0]
assert network_profile_name == policy_name
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_48_0
def test_singke_vs_rules_with_multiple_objects(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'),
custom_config=setup.get('custom_config_file'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
http_policy_set = data['HTTPPolicySet']
network_security_policy = data['NetworkSecurityPolicy']
vs_data = [data for data in vs_object if data['name']
== "40-hol-advanced-http-vs"]
httppolicy = vs_data[0]['http_policies']
for i in httppolicy:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
if policy_name == 'Test-support-Profile-HTTP-HTTP-Policy-Set':
httppolicy = [data['name'] for data in http_policy_set if
data['name'] == policy_name and
'Test-support-Profile-HTTP-HTTP-Policy-Set'][0]
print(policy_name, " ", httppolicy)
assert policy_name == httppolicy
vs_data = [data for data in vs_object if data['name']
== "40-hol-advanced-http-vs"]
policy_ref = vs_data[0]['network_security_policy_ref']
policy_name = policy_ref.split('name=')[1].split('&')[0]
network_profile_name = [i['name'] for i in network_security_policy
if i['name'] == policy_name][0]
assert network_profile_name == policy_name
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_49_0
def test_custom_config_for_hm(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'),
custom_config=setup.get('custom_config_file'),
f5_ssh_port=setup.get('f5_ssh_port'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(input_role_config_file) as i_file:
custom_config = yaml.load(i_file, Loader=yaml.Loader)
with open(o_file) as json_file:
data = json.load(json_file)
hm_object = data['HealthMonitor']
hmdata = [hm for hm in hm_object if hm['name'] == "dns_hol"][0]
config_data = custom_config['healthmonitor_custom_config'][0]
assert hmdata['failed_checks'] == config_data['avi_config'][
'failed_checks']
assert hmdata['send_interval'] == config_data['avi_config'][
'send_interval']
assert hmdata['receive_timeout'] == config_data['avi_config'][
'receive_timeout']
assert (hmdata['external_monitor']['command_code'] ==
config_data['avi_config']['external_monitor']['command_code'])
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_50_0
def test_reboot_clean_v11_17_1_1_for_custom_config(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(file_attribute['controller_ip_17_1_1'],
file_attribute[
'controller_user_17_1_1'],
file_attribute[
'controller_password_17_1_1'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_1<PASSWORD>'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_51_0
def test_custom_config_object_upload(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_1<PASSWORD>'),
option=setup.get('option'),
output_file_path=setup.get('output_file_path'),
custom_config=setup.get('custom_config_file'),
f5_ssh_port=setup.get('f5_ssh_port'),
skip_pki=True)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_52_0
def test_vs_level_status_with_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
vs_level_status=setup.get('vs_level_status'),
f5_ssh_port=setup.get('f5_ssh_port')
)
self.excel_path = os.path.abspath(
os.path.join(
output_file, 'hol_advanced_bigip-ConversionStatus.xlsx'
)
)
assert output_vs_level_status(self.excel_path)
@pytest.mark.travis
def test_vs_level_status_and_segroup_with_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
vs_level_status=setup.get('vs_level_status'),
f5_ssh_port=setup.get('f5_ssh_port'),
segroup=setup.get('segroup')
)
self.excel_path = os.path.abspath(
os.path.join(
output_file, 'hol_advanced_bigip-ConversionStatus.xlsx'
)
)
assert output_vs_level_status(self.excel_path)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_53_0
def test_reboot_clean_for_segroup_v11_17_1_1(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(
file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_1_1'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_<PASSWORD>'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_54_0
def test_segroup_and_upload_v11_17_1_1(self, cleanup):
"""
Input File on Local Filesystem, Test for Controller v17.1.1,
AutoUpload Flow
"""
res = create_segroup(
file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_1_1'],
setup.get('segroup'))
if res.status_code in [200, 201]:
f5_conv(
bigip_config_file=setup.get('config_file_name_v11'),
output_file_path=setup.get('output_file_path'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
option=setup.get('option'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_<PASSWORD>'),
segroup=setup.get('segroup'),
skip_pki=True)
else:
raise Exception("Controller segroup creation faild %s" %
res.content)
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_55_0
def test_reboot_clean_v11_17_1_1_for_vrf_ref(self, cleanup):
"""""
Verify Controller v17.1.1 is running and clean reboot avi api.
After controller setup completed, upload the AviInternal
certificate file.
"""
is_up = verify_controller_is_up(
file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_<PASSWORD>'])
if is_up:
clean_reboot(file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_17_<PASSWORD>'],
file_attribute['controller_version_v17'],
file_attribute['license_file_path'])
print("Controller is running properly.")
else:
print("Controller is not running properly.")
@pytest.mark.skip_travis
@pytest.mark.TCID1_48_1497_56_0
def test_vrf_ref_upload_v11_17_1_1(self):
res = create_vrf_context(
file_attribute['controller_ip_17_1_1'],
file_attribute['controller_user_17_1_1'],
file_attribute['controller_password_<PASSWORD>'],
vrf_name=setup.get('vrf'))
if res.status_code in [200, 201]:
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
output_file_path=setup.get('output_file_path'),
controller_ip=setup.get('controller_ip_17_1_1'),
user=setup.get('controller_user_17_1_1'),
password=setup.get('controller_password_<PASSWORD>'),
option=setup.get('option'),
vrf=setup.get('vrf'), skip_pki=True)
else:
raise Exception("Controller vrf creation faild %s" % res.content)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_57_0
def test_application_profile_on_v11(self, cleanup):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
app_ref = []
for vs in vs_object:
if vs['name'] == "F5-VIP-80-001":
app_ref.append(vs['application_profile_ref'])
elif vs['name'] == "dns_vs_up":
app_ref.append(vs['application_profile_ref'])
elif vs['name'] == "Opcito-vs":
app_ref.append(vs['application_profile_ref'])
for each_ref in app_ref:
profile_name = each_ref.split('name=')[1].split('&')[0]
assert profile_name == "System-L4-Application"
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_58_0
def test_vs_filter_on_v11(self, cleanup):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
vs_filter=setup.get('vs_filter'),
vrf=setup.get('vrf'),
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
assert os.path.exists(o_file)
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_59_0
def test_pool_sharing_policy(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
http_policy_set = data['HTTPPolicySet']
pools = data['Pool']
vs_data1 = [data['http_policies'] for data in vs_object if data['name']
== "33-hol-advanced-http-vs"][0]
vs_data2 = [data['http_policies'] for data in vs_object if data['name']
== "34-hol-advanced-http-vs"][0]
vs_list = list()
vs_list.append(vs_data1[0])
vs_list.append(vs_data2[0])
for i in vs_list:
policy_name = i['http_policy_set_ref'].split('name=')[1].split('&')[
0]
rules = [data['http_request_policy']['rules'] for data
in http_policy_set if data['name'] == policy_name][0]
for r in rules:
pool = r['switching_action']['pool_ref'].split(
'name=')[1].split('&')[0]
pool_name = [data['name'] for data in pools if data['name'] ==
pool][0]
assert pool == pool_name
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_60_0
def test_check_header_insert_policy_on_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
http_policy_set = data['HTTPPolicySet']
vs_data = [data for data in vs_object if data['name'] == "81-hol-advanced-http-vs-dmz"]
httppolicies = vs_data[0]['http_policies']
for i in httppolicies:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
httppolicy = [data['name'] for data in http_policy_set if
data['name'] == policy_name][0]
assert policy_name == httppolicy
@pytest.mark.travis
@pytest.mark.TCID1_48_1497_61_0
def test_check_health_monitor_request_url(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
hm_object = data['HealthMonitor']
monitor_urls = []
for monitor in hm_object:
if 'https_monitor' in monitor:
monitor_urls.append(monitor['https_monitor'][
'http_request'])
elif 'http_monitor' in monitor:
monitor_urls.append(monitor['http_monitor']['http_request'])
for eachUrl in monitor_urls:
request = eachUrl.split('\\r')[0]
assert (request.endswith('HTTP/1.1') or
request.endswith('HTTP/1.0'))
@pytest.mark.travis
def test_single_http_req_policy_with_multiple_vs(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'),
custom_config=setup.get('custom_config_file'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
http_policy_set = data['HTTPPolicySet']
vs_data_of_va1 = [data for data in vs_object if data['name']
== "33-hol-advanced-http-vs"]
vs_data_of_va2 = [data for data in vs_object if data['name']
== "34-hol-advanced-http-vs"]
httppolicydata1 = vs_data_of_va1[0]['http_policies']
httppolicydata2 = vs_data_of_va2[0]['http_policies']
for i in httppolicydata1:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
if 'hol_context_switch_policy' in policy_name:
policy_name_1 = [data['name'] for data in http_policy_set
if data['name'] == policy_name][0]
print(policy_name, " ", policy_name_1)
assert policy_name == policy_name_1
for i in httppolicydata2:
policy_name = i['http_policy_set_ref'].split('name=')[1].split(
'&')[0]
if 'hol_context_switch_policy' in policy_name:
policy_name_2 = [data['name'] for data in http_policy_set
if data['name'] == policy_name][0]
print(policy_name, " ", policy_name_2)
assert policy_name == policy_name_2
@pytest.mark.travis
def test_check_dup_of_key_should_not_be_in_json(self):
f5_conv(
bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
for key in data.keys():
if isinstance(data[key], list):
for i in data[key]:
assert 'dup_of' not in i.keys()
@pytest.mark.travis
def test_distinct_app_profile(self):
f5_conv(
bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
output_file_path=setup.get('output_file_path'),
distinct_app_profile=setup.get('distinct_app_profile'))
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
assert len(data['ApplicationProfile']) > 34
vs = [vs for vs in data['VirtualService']
if vs['name'] == '33-hol-advanced-http-vs']
assert '33-hol-advanced-http-vs' in vs[0]['application_profile_ref']
@pytest.mark.travis
def test_http_policy_sharing_on_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
reuse_http_policy=True
)
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [vs for vs in vs_object if vs['name']
== "81-hol-advanced-http-vs-dmz"][0]
second_vs = [vs for vs in vs_object if vs['name']
== "82-hol-advanced-http-vs-dmz"][0]
vs1_http_policy = first_vs['http_policies'][0]\
['http_policy_set_ref'].split("=")[-1]
vs2_http_policy = second_vs['http_policies'][0] \
['http_policy_set_ref'].split("=")[-1]
assert vs1_http_policy == vs2_http_policy
http_policies = data['HTTPPolicySet']
shared_http_policy = [policy for policy in http_policies
if "hol_hdr_insert-HTTP-Policy-Set"
in policy['name']]
assert len(shared_http_policy) == 1
@pytest.mark.travis
def test_http_policy_sharing_on_v10(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v10'),
f5_config_version=setup.get('file_version_v10'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path'),
f5_ssh_port=setup.get('f5_ssh_port'),
reuse_http_policy=True)
o_file = "%s/%s" % (output_file, "bigip_v10-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService']
first_vs = [vs for vs in vs_object if vs['name']
== "vs_http_policy_share_1"][0]
second_vs = [vs for vs in vs_object if vs['name']
== "vs_http_policy_share_2"][0]
vs1_http_policy = first_vs['http_policies'][0] \
['http_policy_set_ref'].split("=")[-1]
vs2_http_policy = second_vs['http_policies'][0] \
['http_policy_set_ref'].split("=")[-1]
assert vs1_http_policy == vs2_http_policy == \
'_sys_https_redirect'
http_policies = data['HTTPPolicySet']
shared_http_policy = [policy for policy in http_policies
if "_sys_https_redirect"
in policy['name']]
assert len(shared_http_policy) == 1
@pytest.mark.travis
def test_pool_vrf_on_v11(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path')
)
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
custom_vrf_pools = {"Peer_test": "vrf-101"}
custom_vrf_vs = {"vs_custome_vrf": "vrf-101"}
with open(o_file) as json_file:
data = json.load(json_file)
pool_objects = data['Pool']
vs_objects = data['VirtualService']
for pool in pool_objects:
if pool["name"] in custom_vrf_pools:
assert custom_vrf_pools[pool["name"]] in pool["vrf_ref"]
else:
assert "global" in pool["vrf_ref"]
for vs in vs_objects:
if vs["name"] in custom_vrf_vs:
assert custom_vrf_vs[vs["name"]] in vs["vrf_context_ref"]
else:
assert "global" in vs["vrf_context_ref"]
@pytest.mark.travis
def test_monitor_config(self):
f5_conv(bigip_config_file=setup.get('config_file_name_v11'),
f5_config_version=setup.get('file_version_v11'),
controller_version=setup.get('controller_version_v17'),
tenant=file_attribute['tenant'],
cloud_name=file_attribute['cloud_name'],
no_profile_merge=file_attribute['no_profile_merge'],
output_file_path=setup.get('output_file_path')
)
o_file = "%s/%s" % (output_file, "hol_advanced_bigip-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
ssl_cert_objects = data['SSLKeyAndCertificate']
ssl_profile_objects = data['SSLProfile']
expected_cert = [ssl_cert for ssl_cert in ssl_cert_objects
if ssl_cert['name'] == 'monitor.fmr.com.crt']
expected_ssl_profile = [ssl_profile for ssl_profile in ssl_profile_objects
if ssl_profile['name'] == 'client_ssl_profile']
assert expected_cert, "Expected cert monitor.fmr.com.crt not found"
assert expected_ssl_profile, "Expected ssl profile monitor.fmr.com not found"
@pytest.mark.travis
def test_configuration_with_config_yaml(self, cleanup):
f5_conv(args_config_file=setup.get('args_config_file'))
o_file = "%s/%s" % (output_file, "bigip_v11-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = data['VirtualService'][0]
assert not vs_object.get('enabled')
assert not os.path.exists("%s/%s" % (output_file, "avi_config_create_object.yml"))
assert not os.path.exists("%s/%s" % (output_file, "avi_config_delete_object.yml"))
@pytest.mark.travis
def test_configuration_with_overriding_config_yaml(self, cleanup):
f5_conv(args_config_file=setup.get('args_config_file'),
ansible=True, vs_state='enable')
o_file = "%s/%s" % (output_file, "bigip_v11-Output.json")
with open(o_file) as json_file:
data = json.load(json_file)
vs_object = [vs for vs in data['VirtualService'] if vs['name'] == 'vs_2_up'][0]
assert vs_object.get('enabled')
assert os.path.exists("%s/%s" % (output_file, "avi_config_create_object.yml"))
assert os.path.exists("%s/%s" % (output_file, "avi_config_delete_object.yml"))
def teardown():
pass
| StarcoderdataPython |
1765201 | <gh_stars>1-10
import pygal
import pandas as pd
from pygal.style import Style
from pygal.style import DefaultStyle
def line_chart_24h(df):
temperature_list = df["Temperature"].tolist()
humidity_list = df["Humidity"].tolist()
x_labels = []
for time in df.Timestamp:
m = time[11:16]
x_labels.append(m)
#x_labels = df["Timestamp"].tolist()
# Moving Average
temperature_list_moving = df["Temperature"].rolling(window=3).mean().tolist()
humidity_list_moving = df["Humidity"].rolling(window=3).mean().tolist()
# Chart Section
costum_style = Style(
font_family='googlefont:Raleway',
background = 'transparent',
plot_background = 'transparent',
foreground = '#1A3959',
foreground_strong = '#1A3959',
foreground_subtle = '#555',
opacity = '.9',
opacity_hover = '.9',
transition = '1s ease-out',
colors = ('#ff5995', '#cf4878', '#feed6c', '#d1c358'))
costum_style.label_font_size = 12
costum_style.legend_font_size = 12
line_chart_24h = pygal.Line(fill=True,
legend_at_bottom=True,
legend_at_bottom_columns=2,
show_legend=True,
interpolate='cubic',
style=DefaultStyle,
margin_bottom=1,
height=400,
tooltip_border_radius=10,
x_label_rotation=45,
x_labels_major_every=24,
show_minor_x_labels=False)
line_chart_24h.add('Temperature', temperature_list, dots_size=1, show_dots=True)
line_chart_24h.add('Moving Average Temperature', temperature_list_moving, dots_size=2, show_dots=True)
line_chart_24h.add('Humidity', humidity_list, dots_size=1, secondary=True, show_dots=True)
line_chart_24h.add('Moving Average Humidity', humidity_list_moving, dots_size=2, secondary=True)
line_chart_24h.x_labels = x_labels
line_chart_24h.value_formatter = lambda x: "%.1f" % x
return line_chart_24h.render_data_uri()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.