id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3355373 | import dlib # 人脸识别的库dlib
import numpy as np # 数据处理的库numpy
import cv2 # 图像处理的库OpenCv
# dlib预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
# 读取图像
path = "F:/code/python/P_Dlib_face_cut/pic/"
img = cv2.imread(path+"test_faces_6.jpg")
#print("img/shape:", img.shape)
# dlib检测
dets = detector(img, 1)
print("人脸数:", len(dets))
# 记录人脸矩阵大小
height_max = 0
width_sum = 0
# 计算要生成的图像img_blank大小
for k, d in enumerate(dets):
# 计算矩形大小
# (x,y), (宽度width, 高度height)
pos_start = tuple([d.left(), d.top()])
pos_end = tuple([d.right(), d.bottom()])
# 计算矩形框大小
height = d.bottom()-d.top()
width = d.right()-d.left()
# 处理宽度
width_sum += width
# 处理高度
if height > height_max:
height_max = height
else:
height_max = height_max
# 绘制用来显示人脸的图像的大小
print("img_blank的大小:")
print("高度", height_max, "宽度", width_sum)
# 生成用来显示的图像
img_blank = np.zeros((height_max, width_sum, 3), np.uint8)
# 记录每次开始写入人脸像素的宽度位置
blank_start = 0
# 将人脸填充到img_blank
for k, d in enumerate(dets):
height = d.bottom()-d.top()
width = d.right()-d.left()
# 填充
for i in range(height):
for j in range(width):
img_blank[i][blank_start+j] = img[d.top()+i][d.left()+j]
# 调整图像
blank_start += width
cv2.namedWindow("img_faces")#, 2)
cv2.imshow("img_faces", img_blank)
#cv2.imwrite("img_new.jpg", img_blank)
cv2.waitKey(0) | StarcoderdataPython |
3222046 | <filename>bigml/api_handlers/optimlhandler.py
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for optiml's REST calls
https://bigml.com/api/optimls
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_optiml_id
from bigml.constants import OPTIML_PATH
class OptimlHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the OptimlHandler. This class is intended
to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.optiml_url = self.url + OPTIML_PATH
def create_optiml(self, datasets,
args=None, wait_time=3, retries=10):
"""Creates an optiml from a `dataset`
of a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.optiml_url, body)
def get_optiml(self, optiml, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves an optiml.
The model parameter should be a string containing the
optiml id or the dict returned by
create_optiml.
As an optiml is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the optiml
values and state info available at the time it is called.
If this is a shared optiml, the username and
sharing api key must also be provided.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._get("%s%s" % (self.url, optiml_id),
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def optiml_is_ready(self, optiml, **kwargs):
"""Checks whether an optiml's status is FINISHED.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
resource = self.get_optiml(optiml, **kwargs)
return resource_is_ready(resource)
def list_optimls(self, query_string=''):
"""Lists all your optimls.
"""
return self._list(self.optiml_url, query_string)
def update_optiml(self, optiml, changes):
"""Updates an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
body = json.dumps(changes)
return self._update(
"%s%s" % (self.url, optiml_id), body)
def delete_optiml(self, optiml):
"""Deletes an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._delete("%s%s" % (self.url, optiml_id))
| StarcoderdataPython |
1672156 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
import unittest
import numpy
import torch
import torch.nn.functional as F
import nni
from nni.compression.pytorch.pruning import (
LevelPruner,
L1NormPruner,
L2NormPruner,
SlimPruner,
FPGMPruner,
ActivationAPoZRankPruner,
ActivationMeanRankPruner,
TaylorFOWeightPruner,
ADMMPruner,
MovementPruner
)
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 10, 5, 1)
self.bn1 = torch.nn.BatchNorm2d(10)
self.conv2 = torch.nn.Conv2d(10, 20, 5, 1)
self.bn2 = torch.nn.BatchNorm2d(20)
self.fc1 = torch.nn.Linear(4 * 4 * 20, 100)
self.fc2 = torch.nn.Linear(100, 10)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 20)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def trainer(model, optimizer, criterion):
model.train()
for _ in range(10):
input = torch.rand(10, 1, 28, 28)
label = torch.Tensor(list(range(10))).type(torch.LongTensor)
optimizer.zero_grad()
output = model(input)
loss = criterion(output, label)
loss.backward()
optimizer.step()
def get_optimizer(model):
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
class PrunerTestCase(unittest.TestCase):
def test_level_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = LevelPruner(model=model, config_list=config_list)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_level_pruner_bank(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.7}]
pruner = LevelPruner(model=model, config_list=config_list, mode='balance', balance_gran=[5])
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
# round down cause to lower sparsity
assert sparsity_list[0]['total_sparsity'] == 0.6
def test_l1_norm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = L1NormPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_l2_norm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = L2NormPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_fpgm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = FPGMPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_slim_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['BatchNorm2d'], 'total_sparsity': 0.8}]
pruner = SlimPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, training_epochs=1, scale=0.001, mode='global')
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_activation_mean_rank_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = ActivationMeanRankPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
activation='relu', mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_taylor_fo_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = TaylorFOWeightPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
mode='dependency_aware', dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_admm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8, 'rho': 1e-3}]
pruner = ADMMPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, iterations=2, training_epochs=1)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_movement_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = MovementPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, training_epochs=5, warm_up_step=0, cool_down_beginning_step=4)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
class FixSeedPrunerTestCase(unittest.TestCase):
def test_activation_apoz_rank_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = ActivationAPoZRankPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
activation='relu', mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def setUp(self) -> None:
# fix seed in order to solve the random failure of ut
random.seed(1024)
numpy.random.seed(1024)
torch.manual_seed(1024)
def tearDown(self) -> None:
# reset seed
import time
now = int(time.time() * 100)
random.seed(now)
seed = random.randint(0, 2 ** 32 - 1)
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
31754 | <gh_stars>1-10
import json
from pprint import pprint
from urllib.request import urlopen
import csv
class Task:
def __init__(self, taskId, duration, gcTime, executorId):
self.taskId = int(taskId)
self.duration = int(duration)
self.gcTime = int(gcTime)
self.executorId = int(executorId)
def __str__(self):
return f"""
Task: [{self.taskId}]
Duration: [{self.duration}],
GC Time: [{self.gcTime}],
Executor ID: [{self.executorId}],
"""
def __iter__(self):
return iter([self.taskId, self.duration, self.gcTime, self.executorId])
def writeCSV(fileName, data):
with open(f"data/{fileName}.csv", "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(['taskId', 'duration', 'gcTime', 'executorId'])
writer.writerows(data)
#for task in data:
#writer.writerow(task)
def getRequest(url):
data = urlopen(url).read().decode("utf-8");
return json.loads(data)
def getTasksForStage(appId, stage):
tasks = []
data = getRequest(f"{base_application_api}/{appId}/stages/{stage}/0/taskList?length=100000")
for line in data:
print(line)
t = Task(line["taskId"], line["duration"], line["taskMetrics"]["jvmGcTime"], line["executorId"])
tasks.append(t)
assert len(tasks) > 0, f"No tasks found for app [{appId}] in stage[{stage}]"
return tasks
def getAppName(appId):
data = getRequest(f"{base_application_api}/{appId}/environment")
appName = [t[1] for t in data['sparkProperties'] if t[0] == 'spark.app.name'][0]
return appName
def parseAndOutputCsv(appId, stage):
tasks = getTasksForStage(appId, stage)
for t in tasks:
print(t)
appName = getAppName(appId)
writeCSV(f"{appName}-s{stage}", tasks)
#base_api = "http://localhost:18080/api/v1"
base_api = "http://node348:18080/api/v1"
base_application_api = f"{base_api}/applications"
app_id = "app-20180713132544-0015" #18min numa
stage = 2
#parseAndOutputCsv(app_id, stage)
#parseAndOutputCsv("app-20180713130750-0014", stage) #18min default
#parseAndOutputCsv("app-20180717120548-0031", 0) #Sparkpi 10000 default
#parseAndOutputCsv("app-20180717121455-0032", 0) #Sparkpi 10000 numa
#parseAndOutputCsv("app-20180809154803-0000", 0)
#parseAndOutputCsv("app-20180809154803-0000", 1)
parseAndOutputCsv("app-20180809161104-0000", 0)
parseAndOutputCsv("app-20180809161104-0000", 1)
parseAndOutputCsv("app-20180809161104-0000", 2)
#parseAndOutputCsv("app-20180801131654-0001", 2)
#parseAndOutputCsv("app-20180801131012-0000", 2)
#parseAndOutputCsv("app-20180727144031-0015", 0)
#parseAndOutputCsv("app-20180727144758-0016", 0)
#parseAndOutputCsv("app-20180727130603-0012", 2)
#parseAndOutputCsv("app-20180727121640-0004", 2)
#parseAndOutputCsv("app-20180727120007-0002", 2)
| StarcoderdataPython |
1613901 | <filename>source/pkgsrc/games/unknown-horizons/patches/patch-run__uh.py<gh_stars>1-10
$NetBSD: patch-run__uh.py,v 1.1 2019/08/07 12:07:35 nia Exp $
Add PREFIX to list of search paths.
--- run_uh.py.orig 2019-08-07 10:59:56.696840075 +0000
+++ run_uh.py
@@ -159,7 +159,7 @@ def get_content_dir_parent_path():
# Unknown Horizons.app/Contents/Resources/contents
options.append(os.getcwd())
# Try often-used paths on Linux.
- for path in ('/usr/share/games', '/usr/share', '/usr/local/share/games', '/usr/local/share'):
+ for path in ('@PREFIX@/share', '/usr/share/games', '/usr/share', '/usr/local/share/games', '/usr/local/share'):
options.append(os.path.join(path, 'unknown-horizons'))
for path in options:
| StarcoderdataPython |
3364477 | <gh_stars>1-10
class Foo(object):
def __getitem__(self, item):
return item
Foo<warning descr="Class 'type' does not define '__getitem__', so the '[]' operator cannot be used on its instances">[</warning>0]
| StarcoderdataPython |
3295621 | N, M = map(int, input().split())
sc = [list(map(int, input().split())) for _ in range(M)]
S = [1] + [0] * (N-1)
sc = sorted(sc, key=lambda x: x[0])
def get_unique_list(seq):
seen = []
return [x for x in seq if x not in seen and not seen.append(x)]
if N != 1 and [1, 0] in sc:
print(-1)
elif len(list(set([i[0] for i in sc]))) != len(list(get_unique_list(sc))):
print(-1)
elif N == 1 and M == 0:
print(0)
else:
for i in sc:
S[i[0]-1] = "{}".format(i[1])
print(*S, sep="")
| StarcoderdataPython |
3218769 | __version__ = 0.1
import argparse
import urllib2
import webbrowser
def execute_search(query):
query = urllib2.quote("\\{}".format(query))
url = "https://duckduckgo.com/?q={}".format(query)
webbrowser.open_new_tab(url)
def execute_full_search(query):
query = urllib2.quote("{}".format(query))
url = "https://duckduckgo.com/?q={}".format(query)
webbrowser.open_new_tab(url)
def command_line_runner():
parser = argparse.ArgumentParser(description='Search DuckDuckGo right from your terminal')
parser.add_argument('query', type=str, nargs="*", help="Your search query here")
parser.add_argument('-a', action='store_true')
args = parser.parse_args()
if not args.query:
parser.print_help()
else:
if not args.a:
execute_search(' '.join(args.query))
else:
execute_full_search(' '.join(args.query))
if __name__ == "__main__":
command_line_runner()
| StarcoderdataPython |
3360118 | <reponame>isabella232/srtracker
# Copyright (C) 2012-2015, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
import datetime
import requests
CACHE_TIMEOUT = datetime.timedelta(seconds=60 * 10)
services_list = None
last_services_update = datetime.datetime(1, 1, 1)
def services(open311_url, open311_api_key=None):
global services_list, last_services_update
if not services_list or datetime.datetime.utcnow() - last_services_update > CACHE_TIMEOUT:
url = '%s/services.json' % open311_url
params = open311_api_key and {'api_key': open311_api_key} or None
r = requests.get(url, params=params)
last_services_update = datetime.datetime.utcnow()
if r.status_code == 200:
services_list = r.json or []
else:
services_list = []
return services_list
| StarcoderdataPython |
3204769 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
TESTS = [
{
'name': 'Test auto-bisect on tester',
'properties': {
'workdir': '/b/build/slave/linux',
'repository': 'https://chromium.googlesource.com/v8/v8',
'buildername': 'V8 Linux - nosnap',
'parent_buildnumber': 9423,
'recipe': 'v8',
'mastername':
'client.v8',
'buildbotURL': 'http://build.chromium.org/p/client.v8/',
'project': 'v8',
'parent_buildername': 'V8 Linux - nosnap builder',
'git_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_swarming_client_revision':
'df99a00d96fae932bae824dccba13156bf7eddd0',
'buildnumber': 5472,
'bot_id': 'slave4-c3',
'swarm_hashes': {
'bot_default': '3726ca899b099c077b9551f7163c05ea0f160a7b',
'mozilla': 'ba5f8a4aeee89b1fe88c764416ee9875584a10d3',
'simdjs': '55aa4085d018aaf24dc2bc07421515e23cd8a006',
},
'blamelist': ['<EMAIL>', '<EMAIL>'],
'branch': 'master',
'parent_got_revision_cp': 'refs/heads/master@{#32376}',
'requestedAt': 1448632553,
'revision': '<KEY>',
'override_changes': [
{'revision': '469675ee3f137970158305957a76615d33ff253c'},
{'revision': 'd290f204938295bfecc5c8e645ccfcff6e80ddb8'},
{'revision': '<KEY>'},
],
'bisect_duration_factor': 0.5,
'testfilter': [
'cctest/test-serialize/ContextDeserialization',
],
},
'ok_ret': [1],
'verifiers': [
{
'name': 'verify suspects',
'regexp': r'Suspecting multiple commits(?:.|\s)*'
r'd290f204(?:.|\s)*c08e9525',
},
],
},
]
def RunSteps(api):
api.gclient.set_config('build')
api.bot_update.ensure_checkout()
for test in TESTS:
try:
api.python(
name=test['name'],
script=api.path['checkout'].join(
'scripts', 'tools', 'run_recipe.py'),
args=[
'v8',
'--properties-file',
api.json.input(test['properties'])
],
ok_ret=test['ok_ret'],
stdout=api.raw_io.output_text(),
)
finally:
result = api.step.active_result
# Make consumed output visible again.
result.presentation.logs['stdout'] = result.stdout.splitlines()
# Show return code to ease debugging.
result.presentation.logs['retcode'] = [str(result.retcode)]
# Assert invariants.
for verifier in test['verifiers']:
if not re.search(verifier['regexp'], result.stdout):
result.presentation.status = api.step.FAILURE
result.presentation.logs[verifier['name']] = [
'Regular expression "%s" did not match.' % verifier['regexp']]
# Make the overall build fail.
raise api.step.StepFailure('Verifier did not match.')
def GenTests(api):
yield (
api.test('v8-auto-bisect-end-to-end-pass') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits@@\n@@\n@@d290f204@@@\n@@@c08e9525',
stream='stdout',
),
retcode=1,
)
)
yield (
api.test('v8-auto-bisect-end-to-end-fail') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits\ndeadbeef\ndeadbeef',
stream='stdout',
),
retcode=1,
)
)
| StarcoderdataPython |
106220 | from anytree import NodeMixin, iterators, RenderTree
import math
def Make_Virtual():
return SwcNode(nid=-1)
def compute_platform_area(r1, r2, h):
return (r1 + r2) * h * math.pi
#to test
def compute_two_node_area(tn1, tn2, remain_dist):
"""Returns the surface area formed by two nodes
"""
r1 = tn1.radius()
r2 = tn2.radius()
d = tn1.distance(tn2)
print(remain_dist)
if remain_dist >= d:
h = d
else:
h = remain_dist
a = remain_dist / d
r2 = r1 * (1 - a) + r2 * a
area = compute_platform_area(r1, r2, h)
return area
#to test
def compute_surface_area(tn, range_radius):
area = 0
#backtrace
currentDist = 0
parent = tn.parent
while parent and currentDist < range_radius:
remainDist = range_radius - currentDist
area += compute_two_node_area(tn, parent, remainDist)
currentDist += tn.distance(parent)
tn = parent
parent = tn.parent
#forwardtrace
currentDist = 0
childList = tn.children
while len(childList) == 1 and currentDist < range_radius:
child = childList[0]
remainDist = range_radius - currentDist
area += compute_two_node_area(tn, child, remainDist)
currentDist += tn.distance(child)
tn = child
childList = tn.children
return area
class SwcNode(NodeMixin):
"""Represents a node in a SWC tree.
A `SwcNode` object represents a node in the SWC tree structure. As defined
in the SWC format, a node is a 3D sphere with a certain radius. It also has an ID as its unique identifier and a type of neuronal compartment. Except
the root node, a node should also have a parent node. To encode multiple
SWC structures in the same SWC object, we use a negative ID to represent a
virtual node, which does not have any geometrical meaning.
"""
def __init__(self, nid=-1, ntype=0, radius=1, center=[0, 0, 0], parent=None):
self._id = nid
self._radius = radius
self._pos = center
self._type = ntype
self.parent = parent
def is_virtual(self):
"""Returns True iff the node is virtual.
"""
return self._id < 0
def is_regular(self):
"""Returns True iff the node is NOT virtual.
"""
return self._id >= 0
def get_id(self):
"""Returns the ID of the node.
"""
return self._id
def distance(self, tn):
""" Returns the distance to another node.
It returns 0 if either of the nodes is not regular.
Args:
tn : the target node for distance measurement
"""
if tn and self.is_regular() and tn.is_regular():
dx = self._pos[0] - tn._pos[0]
dy = self._pos[1] - tn._pos[1]
dz = self._pos[2] - tn._pos[2]
d2 = dx * dx + dy * dy + dz * dz
return math.sqrt(d2)
return 0.0
def parent_distance(self):
""" Returns the distance to it parent.
"""
return self.distance(self.parent)
def radius(self):
return self._radius
def scale(self, sx, sy, sz, adjusting_radius=True):
"""Transform a node by scaling
"""
self._pos[0] *= sx
self._pos[1] *= sy
self._pos[2] *= sz
if adjusting_radius:
self._radius *= math.sqrt(sx * sy)
def to_swc_str(self):
return '%d %d %g %g %g %g' % (self._id, self._type, self._pos[0], self._pos[1], self._pos[2], self._radius)
def get_parent_id(self):
return -2 if self.is_root else self.parent.get_id()
def __str__(self):
return '%d (%d): %s, %g' % (self._id, self._type, str(self._pos), self._radius)
class SwcTree:
"""A class for representing one or more SWC trees.
For simplicity, we always assume that the root is a virtual node.
"""
def __init__(self):
self._root = Make_Virtual()
def _print(self):
print(RenderTree(self._root).by_attr("_id"))
def clear(self):
self._root = Make_Virtual()
def is_comment(self, line):
return line.strip().startswith('#')
def root(self):
return self._root
def regular_root(self):
return self._root.children
def node_from_id(self, nid):
niter = iterators.PreOrderIter(self._root)
for tn in niter:
if tn.get_id() == nid:
return tn
return None
def parent_id(self, nid):
tn = self.node_from_id(nid)
if tn:
return tn.get_parent_id()
def parent_node(self, nid):
tn = self.node_from_id(nid)
if tn:
return tn.parent
def child_list(self, nid):
tn = self.node_from_id(nid)
if tn:
return tn.children
def for_each_subtree(self, callback):
for tn in self.regular_root():
callback(tn)
def load(self, path):
self.clear()
with open(path, 'r') as fp:
lines = fp.readlines()
nodeDict = dict()
for line in lines:
if not self.is_comment(line):
# print line
data = list(map(float, line.split()))
# print(data)
if len(data) == 7:
nid = int(data[0])
ntype = int(data[1])
pos = data[2:5]
radius = data[5]
parentId = data[6]
tn = SwcNode(nid=nid, ntype=ntype, radius=radius, center=pos)
nodeDict[nid] = (tn, parentId)
fp.close()
for _, value in nodeDict.items():
tn = value[0]
parentId = value[1]
if parentId == -1:
tn.parent = self._root
else:
parentNode = nodeDict.get(parentId)
if parentNode:
tn.parent = parentNode[0]
def save(self, path):
with open(path, 'w') as fp:
niter = iterators.PreOrderIter(self._root)
for tn in niter:
if tn.is_regular():
fp.write('%s %d\n' % (tn.to_swc_str(), tn.get_parent_id()))
fp.close()
def has_regular_node(self):
return len(self.regular_root()) > 0
# def max_id(self):
# for node in
# nodes = self._tree.nodes()
# return max(nodes)
def node_count(self, regular = True):
count = 0
niter = iterators.PreOrderIter(self._root)
for tn in niter:
if regular:
if tn.is_regular():
count += 1
else:
count += 1
return count
def parent_distance(self, nid):
d = 0
tn = self.node(nid)
if tn:
parent_tn = tn.parent
if parent_tn:
d = tn.distance(parent_tn)
return d
def scale(self, sx, sy, sz, adjusting_radius=True):
niter = iterators.PreOrderIter(self._root)
for tn in niter:
tn.scale(sx, sy, sz, adjusting_radius)
def length(self):
niter = iterators.PreOrderIter(self._root)
result = 0
for tn in niter:
result += tn.parent_distance()
return result
def for_each_regular_node(self, callback):
niter = iterators.PreOrderIter(self._root)
for tn in niter:
if tn.is_regular():
callback(tn)
def radius(self, nid):
return self.node(nid).radius()
if __name__ == '__main__':
print('testing ...')
tn1 = SwcNode(nid=1, radius=1, center=[0,0,0])
tn2 = SwcNode(nid=1, radius=1, center=[0,0,2], parent=tn1)
print(compute_two_node_area(tn1, tn2, 0.5))
print(compute_surface_area(tn1, 2.0))
tn = Make_Virtual()
print(tn.get_id())
print(tn.get_parent_id())
print(tn.children)
tn1 = SwcNode(nid=1, parent=tn)
print(tn1.get_parent_id())
print(tn.children)
swc = SwcTree()
swc.load('/Users/zhaot/Work/neutube/neurolabi/data/_benchmark/swc/fork.swc')
swc._print()
swc.save('/Users/zhaot/Work/neutube/neurolabi/data/test.swc')
print(swc.node_count(False))
print(swc.root())
tn = swc.node_from_id(2)
print(tn)
print(swc.has_regular_node())
# print(swc.max_id())
print(swc.node_count())
print(tn.parent_distance())
swc.scale(2, 2, 2)
print(tn.parent_distance())
# # swc.save('/Users/zhaot/Work/neutube/neurolabi/data/test.swc')
print(swc.length())
# print(swc.compute_surface_area(tn, 2))
swc.clear()
swc._print()
tn = SwcNode(nid = 1, radius = 1, parent = swc.root())
swc._print()
swc.for_each_regular_node(lambda tn: print(tn))
# print(swc.compute_surface_area(tn, 2))
| StarcoderdataPython |
32713 | <reponame>BoyanPeychinov/object_oriented_programming
class Book:
def __init__(self, title, author, location):
self.title = title
self.author = author
self.location = location
self.page = 0
def turn_page(self, page):
self.page = page
| StarcoderdataPython |
1716512 | <filename>scattering.py
from __future__ import division, print_function
import numpy as np
import bosehubbard # model base
import graph # forcedirectedgraph layout
import scipy.linalg as linalg
import scipy.sparse as sparse
# $$\ $$\ $$\ $$\
# $$$\ $$$ | $$ | $$ |
# $$$$\ $$$$ | $$$$$$\ $$$$$$$ | $$$$$$\ $$ |
# $$\$$\$$ $$ |$$ __$$\ $$ __$$ |$$ __$$\ $$ |
# $$ \$$$ $$ |$$ / $$ |$$ / $$ |$$$$$$$$ |$$ |
# $$ |\$ /$$ |$$ | $$ |$$ | $$ |$$ ____|$$ |
# $$ | \_/ $$ |\$$$$$$ |\$$$$$$$ |\$$$$$$$\ $$ |
# \__| \__| \______/ \_______| \_______|\__|
class Model(bosehubbard.Model):
"""
Extended version of Bose-Hubbard model with caching of number-sectors.
"""
def __init__(self, Es, links, Us, W=None):
"""
Initiate our scattering structure.
Parameters
----------
Es : list
List of onsite energies.
links : list of lists
List of links on the form [site_1, site_2, strength].
Us : list
Onsite interaction strengths
"""
bosehubbard.Model.__init__(self, Es, links, Us, W)
self.reset()
def numbersector(self, nb):
"""
Returns a specific particle number sector object based on this model.
Cache the result for later retrieval.
Parameters
----------
nb : int
Number of bosons in the given number sector
"""
if nb not in self._cache['ns']:
self._cache['ns'][nb] = bosehubbard.NumberSector(self.n, nb, model=self)
return self._cache['ns'][nb]
def reset(self):
"""
Remove all cached sectors
"""
self._cache = {'ns': {}}
def draw(self, fig=None, ax=None):
"""
Clever force directed plot of any graph.
"""
g = graph.Graph(self.Es, None, self.links)
g.forcedirectedlayout()
g.plot(fig, ax)
# $$$$$$\ $$\ $$\
# $$ __$$\ $$ | $$ |
# $$ / \__|$$$$$$$\ $$$$$$\ $$$$$$$\ $$$$$$$\ $$$$$$\ $$ |
# $$ | $$ __$$\ \____$$\ $$ __$$\ $$ __$$\ $$ __$$\ $$ |
# $$ | $$ | $$ | $$$$$$$ |$$ | $$ |$$ | $$ |$$$$$$$$ |$$ |
# $$ | $$\ $$ | $$ |$$ __$$ |$$ | $$ |$$ | $$ |$$ ____|$$ |
# \$$$$$$ |$$ | $$ |\$$$$$$$ |$$ | $$ |$$ | $$ |\$$$$$$$\ $$ |
# \______/ \__| \__| \_______|\__| \__|\__| \__| \_______|\__|
class Channel:
"""
Coupling between a single channel and one or *more* sites.
"""
def __init__(self, site=None, sites=None, strength=None, strengths=None, positions=None):
"""
Initialize coupling object.
Parameters
----------
channel : int
Channel index
site : int
Site index
sites : list
list of site indices for each coupling to this channel
strength : float
Coupling strength
strengths : list
list of coupling strengths for each coupling
positions: list
list of positions of each coupling coordinate
"""
# set sites and strenths
sites = np.atleast_1d(sites if site is None else site)
strengths = np.atleast_1d(strengths if strength is None else strength)
# set positions.
if positions is None:
positions = [0] * len(sites)
positions = np.atleast_1d(positions)
# indices of non-zero strength
idx = strengths != 0
# re-index everything
self.sites = sites[idx]
self.strengths = strengths[idx]
self.positions = positions[idx]
# number of couplings
self.n = len(self.sites)
# is the coupling local or quasi-local
self.local = np.allclose(self.positions, self.positions[0] * np.ones((self.n, )), 1e-8)
# if all couplings are local, simplify the results by fixing the positions to zero
if self.local:
self.positions = np.zeros((self.n, ), dtype=np.float64)
def gtilde(self, phi=0):
"""
Effective coupling strengths dressed by phase factors.
Note that gtilde() explicitly implements the prefactor to b^dagger and **not** b.
Parameters
----------
phi : float
The energy/wavenumber parameter in units of inverse length.
"""
return self.strengths * np.exp(1j * phi * self.positions)
@property
def gs(self):
"""Alias for all coupling strengths."""
return self.strengths
@property
def xs(self):
"""Alias for positions."""
return self.positions
# $$$$$$\ $$\
# $$ __$$\ $$ |
# $$ / \__| $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$\
# \$$$$$$\ $$ __$$\\_$$ _| $$ | $$ |$$ __$$\
# \____$$\ $$$$$$$$ | $$ | $$ | $$ |$$ / $$ |
# $$\ $$ |$$ ____| $$ |$$\ $$ | $$ |$$ | $$ |
# \$$$$$$ |\$$$$$$$\ \$$$$ |\$$$$$$ |$$$$$$$ |
# \______/ \_______| \____/ \______/ $$ ____/
# $$ |
# $$ |
# \__|
class Setup:
"""
A complete quasi local scattering Setup with
scattering structure (model), channels, and parasitic couplings.
This setup allow for a quasi-locally coupled scatterer.
We employ the Markov approximation for propagation
within the channels.
We retain the phase acquired between coupling sites, as well as
a certain directionality in terms of off-diagonal elements of the
coupling Hamiltonian. In addition we describe dynamics within the
scatterer exactly.
"""
def __init__(self, model, channels, parasites=None):
"""
Initialize the scattering setup.
Parameters
----------
model : Model object
Describes the bosehubbard scattering centre
channels : list of channels objects
List of channels
parasites : List of Coupling objects
List of parasitic coupling objects
"""
self.model = model
self.channels = tuple(channels)
self.parasites = tuple(parasites) if parasites is not None else ()
# is the setup local?
self.local = all([channel.local for channel in self.channels])
# reset all caches
self.reset(model=False)
def reset(self, model=True):
"""Delete all caches."""
if model:
self.model.reset()
self._cache = {'eigen': {}, 'trans': {}, 'trsn': {}, 'sigma': {}}
def eigenbasis(self, nb, phi=0):
"""
Calculates the generalized eigen-energies along with
the left and right eigen-basis.
Parameters
----------
nb : int
Number of bosons
phi : float
Phase factor for the relevant photonic state
"""
phi = 0 if self.local else phi
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['eigen']:
# generate number sector
ns1 = self.model.numbersector(nb)
# get the size of the basis
ns1size = ns1.basis.len # length of the number sector basis
# G1i = xrange(ns1size) # our Greens function?
# self energy
sigma = self.sigma(nb, phi)
# Effective Hamiltonian
H1n = ns1.hamiltonian + sigma
# Complete diagonalization
E1, psi1r = linalg.eig(H1n.toarray(), left=False)
psi1l = np.conj(np.linalg.inv(psi1r)).T
# check for dark states (throw a warning if one shows up)
# if (nb > 0):
# Setup.check_for_dark_states(nb, E1)
self._cache['eigen'][ckey] = (E1, psi1l, psi1r)
return self._cache['eigen'][ckey]
@staticmethod
def check_for_dark_states(nb, Es):
"""Check for dark states, throws a warning if it finds one."""
dark_state_indices = np.where(np.abs(np.imag(Es)) < 10 * np.spacing(1))
if len(dark_state_indices[0]) == 0:
return
import warnings
warnings.warn('The {} block contains {} dark state(s) with generalized eigenenergie(s): {}'.format(nb, len(dark_state_indices), Es[dark_state_indices]))
def sigma(self, nb, phi=0):
"""
Local and quasi-local self energy
Parameters
----------
nb : int
number sector, number of bosons
phi : float
phase contribution in units of energy per length
"""
# Local systems have no phases
phi = 0 if self.local else phi
# Cache the local part
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['sigma']:
# cache the results
self._cache['sigma'][ckey] = Setup.sigma_local(self.model, self.channels + self.parasites, nb)
# Load local sigma from cache
sigmal = self._cache['sigma'][ckey]
# if it is only local: break off calculation here
if self.local:
return sigmal
# generate the additional quasi-local contribution to the self energy
# if nb == 0:
# sigmaql = np.zeros((1,))
# else:
sigmaql = Setup.sigma_quasi_local(self.model, self.channels, nb, phi)
# return local and quasi-local contribution to the self-energy
return sigmal + sigmaql
@staticmethod
def sigma_local(model, channels, nb):
"""
Computes the local self-energy
Parameters
----------
model : Model object
Model object
channels: List of Channel objects
Contains all channels (also )
nb : int
number of bosons/photons
"""
Gams = np.zeros((model.n, model.n), dtype=np.complex128)
# iterate over all sites
for channel in channels:
for n, sn in enumerate(channel.sites):
# diagonal elements
Gams[sn, sn] += - 1j * np.pi * np.abs(channel.strengths[n]) ** 2
for m, sm in enumerate(channel.sites[(n + 1):]):
# off-diagonal elements
if channel.xs[sn] == channel.xs[sm]:
Gams[sn, sm] += - 1j * np.pi * np.conjugate(channel.strengths[m]) * channel.strengths[n]
Gams[sm, sn] += - 1j * np.pi * np.conjugate(channel.strengths[n]) * channel.strengths[m]
# nb numbersector
ns = model.numbersector(nb)
# generate relevant hamiltonian
Ski, Skj, Skv = ns.hopping_hamiltonian(ns.basis, Gams, ns.basis.vs)
# construct dense matrix
Sigma = sparse.coo_matrix((Skv, (Ski, Skj)), shape=[ns.basis.len] * 2).tocsr()
return Sigma
@staticmethod
def sigma_quasi_local(model, channels, nb, phi=0):
"""
Quasi-local self-energy
Parameters
----------
model : Model object
Model object
channels: List of Channel objects
Contains all channels (also )
nb : int
number of bosons/photons
phi : float
phase contribution in units of energy per length
"""
Gams = np.zeros((model.n, model.n), dtype=np.complex128)
# iterate over all channels
for channel in channels:
# skip local channels
if channel.local is True:
continue
# iterate all couplings to this channel
for n, sn in enumerate(channel.sites):
posn = channel.positions[n]
gn = channel.strengths[n]
# iterate all couplings to this channel
for m, sm in enumerate(channel.sites):
posm = channel.positions[m]
gms = np.conjugate(channel.strengths[m])
# only one channel chirality contributes.
# I.e. "leave the model through one channel
# and return from a point further down that same channel".
if posn > posm:
Gams[sn, sm] += - 2 * 1j * np.pi * gms * gn * np.exp(1j * phi * (posn - posm))
# nb numbersector
ns = model.numbersector(nb)
# generate relevant hamiltonian
Ski, Skj, Skv = ns.hopping_hamiltonian(ns.basis, Gams, ns.basis.vs)
# construct dense matrix
Sigma = sparse.coo_matrix((Skv, (Ski, Skj)), shape=[ns.basis.len] * 2).tocsr()
return Sigma
def eigenenergies(self, nb, phi=0):
"""
Return a list of eigenenergies in a given number sector.
Parameter
---------
nb : int
Number of bosons in given number sector
phi: float
phase related to the photonic energy
"""
# no phases for local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['eigen']:
self.eigenbasis(nb, phi)
# load cached result
return self._cache['eigen'][ckey][0]
def transition(self, ni, channel, nf, phi=0):
"""
Generalized transition matrix elements for a single channel
Parameters
----------
ni : int
initial charge sector
channel : int
channel index
nf : int
final number sector
phis : dict
dict of phase parameters for coupling constants (g),
incoming/initial photon number sector (i), and final
photon number sector (f): {i: value, g: value, f: value}.
"""
# no phases for local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}-{}-{}'.format(nf, channel, ni, phi)
if ckey not in self._cache['trans']:
# Effective coupling constant in front of b^\dagger
gt = self.channels[channel].gtilde(phi)
if nf < ni:
gt = np.conj(gt) # b = (b^dagger)^dagger
gen = (gt[i] * self.trsn(ni, self.channels[channel].sites[i], nf, phi) for i in range(self.channels[channel].n))
self._cache['trans'][ckey] = sum(gen)
return self._cache['trans'][ckey]
# gt = self.channels[channel].gtilde(phi)
# gen = (self.channels[channel].strengths[i] * self.trsn(ni, self.channels[channel].sites[i], nf) for i in xrange(len(self.channels[channel].sites)))
# self._cache['trans'][key] = sum(gen)
# return np.sum([gt[i] * self.trsn(ni, self.channels[channel].sites[i], nf, phi) for i in xrange(self.channels[channel].n)])
def trsn(self, ni, site, nf, phi=0):
"""
Bare transition matrix elements in the sites basis.
Parameters
----------
ni : int
initial charge sector
site : int
Model site index
nf : int
final number sector
phis : dict
dict of phase parameters for
incoming/initial photon number sector (i), and final
photon number sector (f): {i: value, f: value}.
"""
# no phases in local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}-{}-{}'.format(nf, site, ni, phi)
if ckey not in self._cache['trsn']:
# initial
nsi = self.model.numbersector(ni)
Ei, psiil, psiir = self.eigenbasis(ni, phi)
# final
nsf = self.model.numbersector(nf)
Ef, psifl, psifr = self.eigenbasis(nf, phi)
# transition
A = np.zeros((nsf.basis.len, nsi.basis.len), dtype=np.complex128)
for i in range(nsi.basis.len):
A[:, i] = psifl.conj().T.dot(
bosehubbard.transition(site, psiir[:, i], nsi.basis, nsf.basis)
)
self._cache['trsn'][ckey] = A
return self._cache['trsn'][ckey]
# UTILITIES
def discrete_energies(E, dE, N=1024, WE=8.):
"""
Discretization of scattering energies.
Parameters
----------
E : float
Total two-particle energy
dE : float
Two-particle energy difference
N : int
Number of discretization points
WE : float
Half width of the scattering energy spectrum
"""
# Discrete energies that contains the "elastic" points:
# nu0=nu2 and nu0=nu3.
NdE = np.ceil(N / 2 / (dE / 2 + WE) * dE / 2)
Lq = N / 2 / NdE * dE / 2 if np.abs(dE) > 0 else WE
qs = np.linspace(-Lq, Lq, N, endpoint=False) + E / 2
return qs
| StarcoderdataPython |
1720401 | <gh_stars>100-1000
import numpy as np
import scipy.sparse
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import expit
from sklearn import metrics
class BinaryLogisticRegressionTrainer:
"""
Class to train l2-regularized binary logistic regression. Supports the following:
[1] Training
[2] Inference/scoring
[3] Metrics computation
This implementation assumes that the binary label setting is 0/1. It also automatically adds an intercept
term during optimization.
The loss and the gradient assume l2-regularization. Users can use the "regularize_bias" switch to regularize the
intercept term or not.
"""
def __init__(self, lambda_l2=1.0, solver="lbfgs", precision=10, num_lbfgs_corrections=10, max_iter=100, regularize_bias=False):
self.lambda_l2 = lambda_l2
assert solver in ("lbfgs",)
self.solver = solver
# Iterations stop when (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= precision * eps
self.precision = precision
self.num_lbfgs_corrections = num_lbfgs_corrections
self.max_iter = max_iter
self.regularize_bias = regularize_bias
# Set the model parameters to None
self.theta = None
def _sigmoid(self, z):
"""
Calculate element-wise sigmoid of array z
"""
# For now, using Scipy's numerically stable sigmoid function
return expit(z)
def _predict(self, theta, X, offsets, return_logits=False):
"""
Calculate logistic regression output when an input matrix is pushed through a parameterized LR model
Output can be logits or sigmoid probabilities
"""
# Handle dense and sparse theta separately
if isinstance(theta, np.ndarray):
z = X.dot(theta) + offsets
elif scipy.sparse.issparse(theta):
z = np.array(X.dot(theta).todense()).squeeze() + offsets
else:
raise Exception(f"Unknown type: {type(theta)!r} for model weights. Accepted types are Numpy ndarray and Scipy sparse matrices")
return z if return_logits else self._sigmoid(z)
def _get_number_of_samples(self, X):
"""
Get number of samples from a data 2d-array
"""
return X.shape[0]
def _get_loss_from_regularization(self, theta, intercept_index=0):
"""
Get loss for regularization term. Exclude intercept if self.regularize_bias is set to false
"""
# For now, we assume "intercept_index" is always zero
if not self.regularize_bias:
loss = (self.lambda_l2 / 2.0) * theta[intercept_index + 1:].dot(theta[intercept_index + 1:])
else:
loss = (self.lambda_l2 / 2.0) * theta.dot(theta)
return loss
def _loss(self, theta, X, y, weights, offsets):
"""
Calculate loss for weighted binary logistic regression
"""
n_samples = self._get_number_of_samples(X)
# For numerical stability, we transform the traditional binary cross entropy loss into a stable, equivalent form
# (Inspired from - https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits)
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
# = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
# = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
# = (1 - z) * x + log(1 + exp(-x))
# = x - x * z + log(1 + exp(-x))
# = log(exp(x)) - x * z + log(1 + exp(-x))
# = - x * z + log(1 + exp(x))
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
pred = X.dot(theta) + offsets
cross_entropy_cost = np.maximum(pred, 0) - pred * y + np.log(1 + np.exp(-np.absolute(pred)))
cost = weights * cross_entropy_cost
# Compute total cost, including regularization
total_cost = (1.0 / n_samples) * (cost.sum() + self._get_loss_from_regularization(theta))
return total_cost
def _get_gradient_from_regularization(self, theta, intercept_index=0):
"""
Get gradient for regularization term. Exclude intercept if self.regularize_bias is set to false
"""
gradient = self.lambda_l2 * theta
if not self.regularize_bias:
gradient[intercept_index] = 0
return gradient
def _gradient(self, theta, X, y, weights, offsets):
"""
Calculate gradient of loss for weighted binary logistic regression
"""
n_samples = self._get_number_of_samples(X)
predictions = self._predict(theta, X, offsets)
cost_grad = X.T.dot(weights * (predictions - y))
grad = (1.0 / n_samples) * (cost_grad + self._get_gradient_from_regularization(theta))
return grad
def _add_column_of_ones(self, X):
"""
Add intercept column to a dense/sparse matrix
"""
if isinstance(X, np.ndarray):
X_with_intercept = np.hstack((np.ones((X.shape[0], 1)), X))
else:
X_with_intercept = scipy.sparse.hstack((np.ones((X.shape[0], 1)), X))
return X_with_intercept
def fit(self, X, y, weights=None, offsets=None, theta_initial=None):
"""
Fit a binary logistic regression model
:param X: a dense or sparse matrix of dimensions (n x d), where n is the number of samples,
and d is the number of features
:param y: vector of binary sample labels; of dimensions (n x 1) where n is the number of samples
:param weights: vector of sample weights; of dimensions (n x 1) where n is the number of samples
:param offsets: vector of sample offsets; of dimensions (n x 1) where n is the number of samples
:param theta_initial: initial value for the coefficients, useful in warm start.
:return: training results dictionary, including learned parameters
"""
# Assert labels are of binary type only
assert np.count_nonzero((y != 0) & (y != 1)) == 0
n_samples = self._get_number_of_samples(X)
if weights is None:
weights = np.ones(n_samples)
if offsets is None:
offsets = np.zeros(n_samples)
# Assert all shapes are same
assert (X.shape[0] == y.shape[0] == weights.shape[0] == offsets.shape[0])
X_with_intercept = self._add_column_of_ones(X)
if theta_initial is None:
theta_initial = np.zeros(X_with_intercept.shape[1])
assert theta_initial.shape == (X_with_intercept.shape[1],), "Initial model should have the same shape as input data"
# Run minimization
result = fmin_l_bfgs_b(func=self._loss,
x0=theta_initial,
approx_grad=False,
fprime=self._gradient,
m=self.num_lbfgs_corrections,
factr=self.precision,
maxiter=self.max_iter,
args=(X_with_intercept, y, weights, offsets),
disp=0)
# Extract learned parameters from result
self.theta = result[0]
return result
def predict_proba(self, X, offsets=None, custom_theta=None, return_logits=False):
"""
Predict binary logistic regression probabilities/logits using a trained model
:param X: a dense or sparse matrix of dimensions (n x d), where n is the number of samples,
and d is the number of features
:param y: vector of binary sample labels; of dimensions (n x 1) where n is the number of samples
:param offsets: vector of sample offsets; of dimensions (n x 1) where n is the number of samples
:param custom_theta: optional weight vector of dimensions (d x 1), overrides learned weights if provided
:param return_logits return probabilities if set to True, logits otherwise
:return: probabilities/logits
"""
# Assert X and offsets are compatible dimension-wise
if offsets is None:
offsets = np.zeros(self._get_number_of_samples(X))
assert (X.shape[0] == offsets.shape[0])
custom_theta = self.theta if custom_theta is None else custom_theta
if custom_theta is None:
raise Exception("Custom weights must be provided if attempting inference on untrained model")
X_with_intercept = self._add_column_of_ones(X)
return self._predict(custom_theta, X_with_intercept, offsets, return_logits)
def compute_metrics(self, X, y, offsets=None, custom_theta=None):
"""
Compute metrics using a trained binary logistic regression model
:param X: a dense or sparse matrix of dimensions (n x d), where n is the number of samples,
and d is the number of features
:param y: vector of binary sample labels; of dimensions (n x 1) where n is the number of samples
:param offsets: vector of sample offsets; of dimensions (n x 1) where n is the number of samples
:param custom_theta: optional weight vector of dimensions (d x 1), overrides learned weights if provided
:return: a dictionary of metrics
"""
# Assert X , y and offsets are compatible dimension-wise
if offsets is None:
offsets = np.zeros(X.shape[0])
assert (X.shape[0] == y.shape[0] == offsets.shape[0])
custom_theta = self.theta if custom_theta is None else custom_theta
if custom_theta is None:
raise Exception("Custom weights must be provided if attempting metrics computation on untrained model")
# Run prediction and calculate AUC
pred = self.predict_proba(X, offsets, custom_theta)
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
return {"auc": auc}
| StarcoderdataPython |
111203 | <reponame>yuqj1990/deepano_train
#!/usr/bin/env python
import numpy as np
def loadCSVFile(file_name):
file_content = np.loadtxt(file_name, dtype=np.str, delimiter=",")
return file_content
| StarcoderdataPython |
3335500 | <reponame>ralfgerlich/modypy
# pylint: disable=missing-module-docstring
import numpy as np
from modypy.blocks.discont import saturation
from modypy.model import Clock, System, signal_function
from modypy.simulation import SimulationResult, Simulator
from numpy import testing as npt
def test_saturation():
system = System()
Clock(system, period=0.01)
@signal_function
def _sine_source(system_state):
return np.sin(2 * np.pi * system_state.time)
saturated_out = saturation(_sine_source, lower_limit=-0.5, upper_limit=0.6)
simulator = Simulator(system, start_time=0.0)
result = SimulationResult(system, simulator.run_until(time_boundary=1.0))
sine_data = _sine_source(result)
saturated_data = saturated_out(result)
saturated_exp = np.minimum(np.maximum(sine_data, -0.5), 0.6)
npt.assert_almost_equal(saturated_data, saturated_exp)
| StarcoderdataPython |
3209025 | <filename>assets/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import assets.asset_helpers
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(null=True, max_length=100, blank=True)),
('delete_file_with_record',
models.BooleanField(default=True, help_text='This will delete the file if this asset is deleted')),
('file', models.FileField(upload_to=assets.asset_helpers.generate_asset_file_name)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssetType',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('type_of_asset', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='SecureAsset',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(null=True, max_length=100, blank=True)),
('delete_file_with_record',
models.BooleanField(default=True, help_text='This will delete the file if this asset is deleted')),
('file', models.FileField(upload_to=assets.asset_helpers.generate_asset_file_name,
storage=django.core.files.storage.FileSystemStorage(
base_url='/secure_asset',
location='/home/geomemes/cedar_media_secure'))),
('asset_type', models.ForeignKey(to='assets.AssetType')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='asset',
name='asset_type',
field=models.ForeignKey(to='assets.AssetType'),
),
]
| StarcoderdataPython |
4837717 | <reponame>PiRK/silx<filename>silx/gui/data/test/test_textformatter.py
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "24/01/2017"
import unittest
from silx.gui.test.utils import TestCaseQt
from silx.gui.test.utils import SignalListener
from ..TextFormatter import TextFormatter
class TestTextFormatter(TestCaseQt):
def test_copy(self):
formatter = TextFormatter()
copy = TextFormatter(formatter=formatter)
self.assertIsNot(formatter, copy)
copy.setFloatFormat("%.3f")
self.assertEquals(formatter.integerFormat(), copy.integerFormat())
self.assertNotEquals(formatter.floatFormat(), copy.floatFormat())
self.assertEquals(formatter.useQuoteForText(), copy.useQuoteForText())
self.assertEquals(formatter.imaginaryUnit(), copy.imaginaryUnit())
def test_event(self):
listener = SignalListener()
formatter = TextFormatter()
formatter.formatChanged.connect(listener)
formatter.setFloatFormat("%.3f")
formatter.setIntegerFormat("%03i")
formatter.setUseQuoteForText(False)
formatter.setImaginaryUnit("z")
self.assertEquals(listener.callCount(), 4)
def test_int(self):
formatter = TextFormatter()
formatter.setIntegerFormat("%05i")
result = formatter.toString(512)
self.assertEquals(result, "00512")
def test_float(self):
formatter = TextFormatter()
formatter.setFloatFormat("%.3f")
result = formatter.toString(1.3)
self.assertEquals(result, "1.300")
def test_complex(self):
formatter = TextFormatter()
formatter.setFloatFormat("%.1f")
formatter.setImaginaryUnit("i")
result = formatter.toString(1.0 + 5j)
result = result.replace(" ", "")
self.assertEquals(result, "1.0+5.0i")
def test_string(self):
formatter = TextFormatter()
formatter.setIntegerFormat("%.1f")
formatter.setImaginaryUnit("z")
result = formatter.toString("toto")
self.assertEquals(result, '"toto"')
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(
unittest.defaultTestLoader.loadTestsFromTestCase(TestTextFormatter))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| StarcoderdataPython |
3358247 | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from pmpy.des import *
'''
testing preemptive resources
this is not working yet
'''
def p1(a:entity,R):
yield a.get(R,1,False)
yield a.interruptive_do('something',10)
yield a.put(R)
def p2(b,R):
yield b.do('b_act',5)
r=b.get(R,priority=-1,preempt=True)
yield r
print("interrupted")
yield b.do('b_act2',5)
yield b.put(R)
env=environment()
e1=entity(env,'e1',print_actions=True)
e2=entity(env,'e2',print_actions=True)
R=preemptive_resource(env,'Truck',print_actions=True)
env.process(p1(e1,R))
env.process(p2(e2,R))
env.run()
| StarcoderdataPython |
1699726 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class UsersConfig(AppConfig):
name = "apps.users"
verbose_name = _("users")
def ready(self):
import apps.users.signals
| StarcoderdataPython |
3352803 | import nbformat as nbf
import glob
import shutil
import nb2html
retrieve_name_from_cell = lambda cell_source: cell_source.replace('#','').strip()
def retrieve_name_from_fname(fname):
nb = nbf.read(open(fname),nbf.current_nbformat)
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
return retrieve_name_from_cell(cell['source'])
return 'ERROR'
def get_manual_configuration():
import yaml
return yaml.load(open('notebooks/config.yml'))
def get_configuration():
configuration = []
for chapter_folder in sorted(glob.glob("notebooks/Chapter_*")):
all_section_fnames = sorted(glob.glob("%s/*.ipynb"%chapter_folder))
all_section_info = [
dict(
file_name=fname.rpartition('/')[2],
section_name=retrieve_name_from_fname(fname)
)
for fname in all_section_fnames]
chapter_name = all_section_info[0]['section_name']
configuration.append(dict(
chapter_name=chapter_name,
folder_name=chapter_folder[10:],
sections=all_section_info,
))
return configuration
#
#
# A function which makes the markdown file for a section
#
#
outline = """{}
------
{{% include "../notebooks-html/{}" %}}
"""
def make_md_file(title,ipynb_name,cold=False):
filled_outline = outline.format(title,ipynb_name.replace('ipynb','html'))
if cold:
print(filled_outline)
return
with open('notebooks-md/%s'%(ipynb_name.replace('ipynb','md')),'w') as f:
f.write(filled_outline)
#
#
# In this section
# - python notebooks (.ipynb) are moved to "notebooks-flat"
# - markdown files (.md) are generated for each section in "notebooks-md"
#
#
def copy_into_flat_directory(configuration,cold=False):
shutil.copy2('notebooks/To_the_Student.ipynb','notebooks-flat/To_the_Student.ipynb')
make_md_file('To The Student','To_the_Student.ipynb')
for n,chapter in zip(range(1,len(configuration)+1),configuration):
for section in chapter['sections']:
src = ('notebooks/%s/%s'%(chapter['folder_name'],section['file_name']))
dest = ('notebooks-flat/%d_%s'%(n,section['file_name']))
ipynb_file = '%d_%s'%(n,section['file_name'])
make_md_file(section['section_name'],ipynb_file,cold=cold)
if cold:
print("Copying from %s to %s"%(src,dest))
else:
shutil.copy2(src,dest)
#
#
# In this section
# - The summary file (SUMMARY.md) is generated
#
#
def generate_summary(configuration,cold=False):
SUMMARY_head = """
# Summary
* [Authors and License](README.md)
* [To the Student](notebooks-md/To_the_Student.md)
"""
chapter_summaries = [SUMMARY_head]
for n,chapter in zip(range(1,len(configuration)+1),configuration):
chapter_intro_md = chapter['sections'][0]['file_name'].replace('ipynb','md')
entries = ['* [Chapter %d: %s](notebooks-md/%d_%s)'%(n,chapter['chapter_name'],n,chapter_intro_md)]
for i,section in list(enumerate(chapter['sections']))[1:]:
section_md = section['file_name'].replace ('ipynb','md')
section_entry = ('\t* [%d.%d %s](notebooks-md/%d_%s)'%(n,i,section['section_name'],n,section_md))
entries.append(section_entry)
chapter_summaries.append('\n'.join(entries)+'\n')
SUMMARY_md ="\n".join(chapter_summaries)
if cold:
print(SUMMARY_md)
else:
with open("SUMMARY.md","w") as f:
f.write(SUMMARY_md)
if __name__ == "__main__":
configuration = get_configuration()
copy_into_flat_directory(configuration,cold=False)
generate_summary(configuration,cold=False)
notebook_paths = glob.glob('notebooks-flat/*.ipynb')
nb2html.convert_notebooks_to_html_partial(notebook_paths)
print(open("SUMMARY.md",'r').read())
| StarcoderdataPython |
3389320 | import requests
from bs4 import BeautifulSoup
import csv
import os
import glob
import time
from datetime import datetime
from config import *
#####################################################
# Function : get_latest_stats
# Description : Gets the latest stats from the specified page and stores data in a CSV
#####################################################
def get_latest_stats():
# Getting page HTML
page = requests.get(fah_stats_page)
soup = BeautifulSoup(page.text, 'html.parser')
# Finding the table and getting all rows
members_table = soup.find(class_='members')
members_table_entries = members_table.find_all('tr')
# Creating CSV file
f = csv.writer(open(stats_folder_dir + file_name , 'w'))
# Getting the string contents of each row and storing in CSV
for entry in members_table_entries[1:]:
entry_text = entry.get_text()
lines = entry_text.split('\n')
rank = int(lines[1])
team_rank = int(lines[2])
name = lines[3]
score = int(lines[4])
work_units = int(lines[5])
f.writerow([rank,team_rank,name,score,work_units])
#####################################################
# Function : calculate_ppd
# Description : Calculates the average PPD of all folders in the team using up to the previous 24 sets of stats.
#####################################################
def calculate_ppd():
# files = glob.glob(stats_folder_dir)
files = os.listdir(stats_folder_dir)
files = glob.glob(stats_folder_dir + "*.csv")
files.sort(key=os.path.getmtime)
print(files)
print(len(files))
if len(files) >= 24:
# Determine if time between most recent file and 24 files previous has been 24 hours or greater (margine of error 5 minutes)
print("Can calulate PPD")
else:
# Calculate PPH between each pair of files and average
print("Can only calculate PPH")
def main():
get_latest_stats()
calculate_ppd()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3328028 | <reponame>mithrindel/Misc-scripts<gh_stars>0
#!/usr/bin/python
import os
import time
import datetime
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
###SCRIPT VARIABLE###
recordingPath_root = r'\\XXX'
averageFileSizeCriteria = 5000 #bytes
smtpServer = 'XXX'
smtpPort = 25
fromaddr = 'XXX'
toaddr = 'XXX'
###Checks day recording size if not on Saturday or Sunday. Send a Mail if file exist and is too small###
def checkRecordingSize():
date = datetime.date.today()
year = date.strftime("%Y")
month = date.strftime("%m").lstrip('0')
day = date.strftime("%d").lstrip('0')
folderSize = 0
nbFiles = 0
averageFileSize = 0
try:
recordingPath_date = os.path.join(recordingPath_root, year, month, day)
print ("[checkRecordingSize] - Checking recording average size...")
for root, dirs, files in os.walk(recordingPath_date):
for recordingFile in files:
recordingFile = os.path.join(os.path.abspath(root),recordingFile)
folderSize += os.path.getsize(recordingFile)
nbFiles += 1
averageFileSize = round(folderSize/nbFiles)
print('[checkRecordingSize] - averageFileSize =', averageFileSize/1000, 'KB')
if(averageFileSize < averageFileSizeCriteria):
print ("[checkRecordingSize] - Average file size abnormally small")
sendMail("[WARNING] - Average file size abnormally small")
else:
print ("[checkRecordingSize] - Average size OK")
sendMail("[OK] - Average file size OK")
except:
print ("[checkRecordingSize] - ERROR")
sendMail("[WARNING] - Error running script")
return
###Send an email using the global variable on top of script. Message content is variable msg###
def sendMail(mailContent):
try:
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = mailContent
body = ''
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(smtpServer, 25)
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
print("[sendMail] - Mail sending SUCCESS")
except:
print("[sendMail] - Mail sending FAIL")
return
###Used to clear the console screen where script is launched###
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
###Main function of the script###
def main():
clear_screen()
print ('\n\n')
print ('\t* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *')
print ('\t* *')
print ('\t* Welcome to the XXXXXXXX Morning Check *')
print ('\t* *')
print ('\t* Developed by mithrindel *')
print ('\t* *')
print ('\t* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *')
print ('\n\n')
print ("[main_menu] - checkRecordingSize call")
time.sleep(1)
checkRecordingSize()
print ("[main_menu] - End of the execution")
time.sleep(2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4810780 | """ Collection of Data Scaling Transformers """
from typing import Dict, Iterable, Tuple, Union
from ..changemap.changemap import ChangeMap
from .running_stats import RunningStats
class MinMaxScaler(RunningStats):
"""
Scalers are also very important to ML frameworks. Many model types, notably neural networks among
others, have a significant increase in performance when the input features are normalized into
comparable scales. `MinMaxScaler` extends `RunningStats` to keep track of the minimum and maximum of
each marked feature. Then, it computes a simple normalization technique to scale the output between
`0` and `1` by adding the minimum value found and dividing by the maximum for each sample. Example:
```python
arr = [random.random() * 100 for _ in range(100)]
transformer = MinMaxScaler()
transformer.fit(arr)
transformer.transform(arr)[:10]
# Output:
# array([0.84741866, 0.76052674, 0.42148772, 0.25903933, 0.51263612,
# 0.40577351, 0.7864978 , 0.30365325, 0.47778812, 0.58509741])
```
"""
def __init__(
self,
columns: Union[Dict, Iterable[int]] = None,
output_range: Tuple[int, int] = None,
verbose: bool = False,
):
"""
Parameters
----------
columns : dict, Iterable[int]
TODO
output_range : Tuple[int, int]
TODO
verbose : bool
TODO
"""
if columns is None:
columns = [0]
super().__init__(output_range=output_range, columns=columns, verbose=verbose)
self.output_range = output_range or (0, 1)
def transform(self, X: Iterable[Iterable]):
X = self.check_X(X)
for i, col in enumerate(X):
if i not in self.columns_:
continue
X[i] = (col - self.min_[i]) / (self.max_[i] - self.min_[i])
# Reshape as vector if input was vector
if self.input_is_vector_:
X = X[0]
return X
def inverse_transform(self, X):
X = self.check_X(X, ensure_shape=False, ensure_dtype=False)
for i, col in enumerate(X):
if i not in self.columns_:
continue
X[i] = col * (self.max_[i] - self.min_[i]) + self.min_[i]
# Reshape as vector if input was vector
if self.input_is_vector_:
X = X[0]
return X
def on_input_shape_changed(self, change_map: ChangeMap):
# Parent's callback will take care of adapting feature changes
super().on_input_shape_changed(change_map)
# This transformer does not change shape of input, so we must propagate the change upwards
self.on_output_shape_changed(change_map)
class StandardScaler(RunningStats):
"""
Similar to `MinMaxScaler`, `StandardScaler` uses `RunningStats` to compute [standard scaling](
https://en.wikipedia.org/wiki/Feature_scaling#Standardization) that takes into account the mean and
standard deviation, instead of the minimum and maximum, when performing the normalization. Example:
```python
arr = [random.random() * 100 for _ in range(100)]
transformer = StandardScaler()
transformer.fit(arr)
transformer.transform(arr)[:10]
# Output:
# array([ 0.97829734, 0.6513745 , -0.62422864, -1.23542577, -0.28129117,
# -0.6833519 , 0.74908821, -1.06757002, -0.41240357, -0.00866223])
```
"""
def __init__(self, columns: Union[Dict, Iterable[int]] = None, verbose: bool = False):
"""
Parameters
----------
columns : dict, Iterable[int]
TODO
verbose : bool
TODO
"""
if columns is None:
columns = [0]
super().__init__(columns=columns, verbose=verbose)
def transform(self, X: Iterable[Iterable]):
X = self.check_X(X)
for i, col in enumerate(X):
if i not in self.columns_:
continue
X[i] = (col - self.mean_[i]) / self.stdev_[i]
# Reshape as vector if input was vector
if self.input_is_vector_:
X = X[0]
return X
def inverse_transform(self, X):
X = self.check_X(X, ensure_shape=False, ensure_dtype=False)
for i, col in enumerate(X):
if i not in self.columns_:
continue
X[i] = col * self.stdev_[i] + self.mean_[i]
# Reshape as vector if input was vector
if self.input_is_vector_:
X = X[0]
return X
def on_input_shape_changed(self, change_map: ChangeMap):
# Parent's callback will take care of adapting feature changes
super().on_input_shape_changed(change_map)
# This transformer does not change shape of input, so we must propagate the change upwards
self.on_output_shape_changed(change_map)
| StarcoderdataPython |
1685086 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import sure # noqa
import moto.server as server
from moto import mock_secretsmanager
"""
Test the different server responses for secretsmanager
"""
DEFAULT_SECRET_NAME = "test-secret"
@mock_secretsmanager
def test_get_secret_value():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME, "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["SecretString"] == "foo-secret"
@mock_secretsmanager
def test_get_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
get_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist", "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_get_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match", "VersionStage": "AWSCURRENT"},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_get_secret_that_has_no_value():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
get_secret = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
json_data = json.loads(get_secret.data.decode("utf-8"))
assert (
json_data["message"]
== "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT"
)
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_create_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
res = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foo-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
res_2 = test_client.post(
"/",
data={"Name": "test-secret-2", "SecretString": "bar-secret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
json_data = json.loads(res.data.decode("utf-8"))
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
json_data_2 = json.loads(res_2.data.decode("utf-8"))
assert json_data_2["ARN"] != ""
assert json_data_2["Name"] == "test-secret-2"
@mock_secretsmanager
def test_describe_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
create_secret_2 = test_client.post(
"/",
data={"Name": "test-secret-2", "SecretString": "barsecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret_2 = test_client.post(
"/",
data={"SecretId": "test-secret-2"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
json_data_2 = json.loads(describe_secret_2.data.decode("utf-8"))
assert json_data_2 # Returned dict is not empty
assert json_data_2["ARN"] != ""
assert json_data_2["Name"] == "test-secret-2"
@mock_secretsmanager
def test_describe_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
describe_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_describe_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match"},
headers={"X-Amz-Target": "secretsmanager.DescribeSecret"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = "<PASSWORD>"
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == DEFAULT_SECRET_NAME
assert json_data["VersionId"] == client_request_token
# @mock_secretsmanager
# def test_rotate_secret_enable_rotation():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post(
# '/',
# data={
# "Name": "test-secret",
# "SecretString": "foosecret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# initial_description = test_client.post(
# '/',
# data={
# "SecretId": "test-secret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.DescribeSecret"
# },
# )
# json_data = json.loads(initial_description.data.decode("utf-8"))
# assert json_data # Returned dict is not empty
# assert json_data['RotationEnabled'] is False
# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0
# rotate_secret = test_client.post(
# '/',
# data={
# "SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 42}
# },
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# rotated_description = test_client.post(
# '/',
# data={
# "SecretId": "test-secret"
# },
# headers={
# "X-Amz-Target": "secretsmanager.DescribeSecret"
# },
# )
# json_data = json.loads(rotated_description.data.decode("utf-8"))
# assert json_data # Returned dict is not empty
# assert json_data['RotationEnabled'] is True
# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42
@mock_secretsmanager
def test_rotate_secret_that_does_not_exist():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
rotate_secret = test_client.post(
"/",
data={"SecretId": "i-dont-exist"},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret_that_does_not_match():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
rotate_secret = test_client.post(
"/",
data={"SecretId": "i-dont-match"},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "Secrets Manager can't find the specified secret."
assert json_data["__type"] == "ResourceNotFoundException"
@mock_secretsmanager
def test_rotate_secret_client_request_token_too_short():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = "<PASSWORD>"
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "ClientRequestToken must be 32-64 characters long."
assert json_data["__type"] == "InvalidParameterException"
@mock_secretsmanager
def test_rotate_secret_client_request_token_too_long():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
client_request_token = (
"<PASSWORD>-" "<PASSWORD>"
)
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"ClientRequestToken": client_request_token,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "ClientRequestToken must be 32-64 characters long."
assert json_data["__type"] == "InvalidParameterException"
@mock_secretsmanager
def test_rotate_secret_rotation_lambda_arn_too_long():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
rotation_lambda_arn = "85B7-446A-B7E4" * 147 # == 2058 characters
rotate_secret = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"RotationLambdaARN": rotation_lambda_arn,
},
headers={"X-Amz-Target": "secretsmanager.RotateSecret"},
)
json_data = json.loads(rotate_secret.data.decode("utf-8"))
assert json_data["message"] == "RotationLambdaARN must <= 2048 characters long."
assert json_data["__type"] == "InvalidParameterException"
@mock_secretsmanager
def test_put_secret_value_puts_new_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "foosecret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "foosecret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
version_id = second_secret_json_data["VersionId"]
secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"VersionId": version_id,
"VersionStage": "AWSCURRENT",
},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8"))
assert second_secret_json_data
assert second_secret_json_data["SecretString"] == "foosecret"
@mock_secretsmanager
def test_put_secret_value_can_get_first_version_if_put_twice():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
first_secret_string = "first_secret"
second_secret_string = "second_secret"
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": first_secret_string,
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": second_secret_string,
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
get_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"VersionId": first_secret_version_id,
"VersionStage": "AWSCURRENT",
},
headers={"X-Amz-Target": "secretsmanager.GetSecretValue"},
)
get_first_secret_json_data = json.loads(
get_first_secret_value_json.data.decode("utf-8")
)
assert get_first_secret_json_data
assert get_first_secret_json_data["SecretString"] == first_secret_string
@mock_secretsmanager
def test_put_secret_value_versions_differ_if_same_secret_put_twice():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
second_secret_version_id = second_secret_json_data["VersionId"]
assert first_secret_version_id != second_secret_version_id
@mock_secretsmanager
def test_can_list_secret_version_ids():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
put_first_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
first_secret_json_data = json.loads(
put_first_secret_value_json.data.decode("utf-8")
)
first_secret_version_id = first_secret_json_data["VersionId"]
put_second_secret_value_json = test_client.post(
"/",
data={
"SecretId": DEFAULT_SECRET_NAME,
"SecretString": "secret",
"VersionStages": ["AWSCURRENT"],
},
headers={"X-Amz-Target": "secretsmanager.PutSecretValue"},
)
second_secret_json_data = json.loads(
put_second_secret_value_json.data.decode("utf-8")
)
second_secret_version_id = second_secret_json_data["VersionId"]
list_secret_versions_json = test_client.post(
"/",
data={"SecretId": DEFAULT_SECRET_NAME},
headers={"X-Amz-Target": "secretsmanager.ListSecretVersionIds"},
)
versions_list = json.loads(list_secret_versions_json.data.decode("utf-8"))
returned_version_ids = [v["VersionId"] for v in versions_list["Versions"]]
assert [
first_secret_version_id,
second_secret_version_id,
].sort() == returned_version_ids.sort()
@mock_secretsmanager
def test_get_resource_policy_secret():
backend = server.create_backend_app("secretsmanager")
test_client = backend.test_client()
create_secret = test_client.post(
"/",
data={"Name": "test-secret", "SecretString": "foosecret"},
headers={"X-Amz-Target": "secretsmanager.CreateSecret"},
)
describe_secret = test_client.post(
"/",
data={"SecretId": "test-secret"},
headers={"X-Amz-Target": "secretsmanager.GetResourcePolicy"},
)
json_data = json.loads(describe_secret.data.decode("utf-8"))
assert json_data # Returned dict is not empty
assert json_data["ARN"] != ""
assert json_data["Name"] == "test-secret"
#
# The following tests should work, but fail on the embedded dict in
# RotationRules. The error message suggests a problem deeper in the code, which
# needs further investigation.
#
# @mock_secretsmanager
# def test_rotate_secret_rotation_period_zero():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post('/',
# data={"Name": "test-secret",
# "SecretString": "foosecret"},
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# rotate_secret = test_client.post('/',
# data={"SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 0}},
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# json_data = json.loads(rotate_secret.data.decode("utf-8"))
# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000."
# assert json_data['__type'] == 'InvalidParameterException'
# @mock_secretsmanager
# def test_rotate_secret_rotation_period_too_long():
# backend = server.create_backend_app('secretsmanager')
# test_client = backend.test_client()
# create_secret = test_client.post('/',
# data={"Name": "test-secret",
# "SecretString": "foosecret"},
# headers={
# "X-Amz-Target": "secretsmanager.CreateSecret"
# },
# )
# rotate_secret = test_client.post('/',
# data={"SecretId": "test-secret",
# "RotationRules": {"AutomaticallyAfterDays": 1001}},
# headers={
# "X-Amz-Target": "secretsmanager.RotateSecret"
# },
# )
# json_data = json.loads(rotate_secret.data.decode("utf-8"))
# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000."
# assert json_data['__type'] == 'InvalidParameterException'
| StarcoderdataPython |
3243950 | from .. import ast
from .base import BaseNodeTransformer
class RaiseFromTransformer(BaseNodeTransformer):
"""Compiles:
raise TypeError('Bad') from exc
To
raise TypeError('Bad')
"""
target = (2, 7)
def visit_Raise(self, node: ast.Raise) -> ast.Raise:
if node.cause:
self._tree_changed = True
node.cause = None
return self.generic_visit(node) # type: ignore
| StarcoderdataPython |
3384445 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
from rqalpha.api import api_base, api_extension
def get_apis():
apis = {name: getattr(api_base, name) for name in api_base.__all__}
apis.update((name, getattr(api_extension, name)) for name in api_extension.__all__)
return apis
| StarcoderdataPython |
3289051 | <reponame>WhitePaper233/ShinoBot
# -*- coding: utf-8 -*-
async def get_weather_of_city(city: str) -> str:
return f'{city}的天气是……' | StarcoderdataPython |
196607 | from queue import Queue
import tldextract
from time import sleep
from requests_html import HTMLSession
from concurrent.futures import ThreadPoolExecutor
from collections import OrderedDict
def spider(urls_q):
while True:
# check queue
if urls_q.empty():
sleep(5)
if urls_q.empty():
print('queue is empty')
break
try:
# take url from queue
url = urls_q.get()
# start extract urls from page
r = session.get(url)
links = r.html.absolute_links
# check if extracr link is current domain
for link in links:
if gen_domain in link:
if link in internal_urls:
continue
else:
urls_q.put(link)
internal_urls.add(link)
# check if extract link in database
else:
ext = tldextract.extract(link)
domain = ext.domain + '.' + ext.suffix
# kostil in attack
with open('domain_database.txt', 'r', encoding='UTF-8') as file_domain_database:
file_reader = file_domain_database.read()
if domain in file_reader:
checker = True
else:
checker = False
if checker is True:
continue
# write only unique domain
else:
with open('domain_database.txt', 'a', encoding='UTF-8') as file_domain_database:
file_domain_database.write(domain + '\n')
with open(gen_domain+'_all_external.txt', 'a', encoding='utf-8') as domain_external_domains:
domain_external_domains.write(domain + '\n')
print(domain)
# external_domains.add(domain)
except Exception as error:
with open(gen_domain+'_error.txt', 'a', encoding='UTF-8') as file_error:
file_error.write(domain+'\n')
print('Error:', str(error), str(type(error)))
gen_domain = 'wikipedia.org' # you domain
start_url = 'https://en.wikipedia.org/wiki/Main_Page' # start url
internal_urls = set()
# external_domains = set()
urls_q = Queue()
urls_q.put(start_url)
session = HTMLSession()
threads = 10
with ThreadPoolExecutor(max_workers=threads) as ex:
for _ in range(threads):
ex.submit(spider, urls_q)
| StarcoderdataPython |
184631 | <filename>python_kivy_app/conex.py
import socket
class Conexao:
def __init__(self, ip, porta):
self.ip = ip
self.porta =porta
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dest=((self.ip,self.porta))
self.tcp.connect(dest)
def enviar(self,msg):
self.tcp.sendall(msg)
def receber(self):
msg=self.tcp.recv(1024)
return msg
def fechar(self):
self.tcp.close()
| StarcoderdataPython |
1654306 | from django.apps import AppConfig
class ResumeAppConfig(AppConfig):
name = 'resume_app'
| StarcoderdataPython |
1622089 | <reponame>amir-esmaeili/IUSTCompiler
# Generated from /home/amiresm/Projects/personal/compiler/tac.g4 by ANTLR 4.9.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .tacParser import tacParser
else:
from tacParser import tacParser
# This class defines a complete listener for a parse tree produced by tacParser.
class tacListener(ParseTreeListener):
# Enter a parse tree produced by tacParser#program.
def enterProgram(self, ctx:tacParser.ProgramContext):
pass
# Exit a parse tree produced by tacParser#program.
def exitProgram(self, ctx:tacParser.ProgramContext):
pass
# Enter a parse tree produced by tacParser#mainClass.
def enterMainClass(self, ctx:tacParser.MainClassContext):
pass
# Exit a parse tree produced by tacParser#mainClass.
def exitMainClass(self, ctx:tacParser.MainClassContext):
pass
# Enter a parse tree produced by tacParser#classDecleration.
def enterClassDecleration(self, ctx:tacParser.ClassDeclerationContext):
pass
# Exit a parse tree produced by tacParser#classDecleration.
def exitClassDecleration(self, ctx:tacParser.ClassDeclerationContext):
pass
# Enter a parse tree produced by tacParser#varDeclaration.
def enterVarDeclaration(self, ctx:tacParser.VarDeclarationContext):
pass
# Exit a parse tree produced by tacParser#varDeclaration.
def exitVarDeclaration(self, ctx:tacParser.VarDeclarationContext):
pass
# Enter a parse tree produced by tacParser#methodDeclaration.
def enterMethodDeclaration(self, ctx:tacParser.MethodDeclarationContext):
pass
# Exit a parse tree produced by tacParser#methodDeclaration.
def exitMethodDeclaration(self, ctx:tacParser.MethodDeclarationContext):
pass
# Enter a parse tree produced by tacParser#typeId.
def enterTypeId(self, ctx:tacParser.TypeIdContext):
pass
# Exit a parse tree produced by tacParser#typeId.
def exitTypeId(self, ctx:tacParser.TypeIdContext):
pass
# Enter a parse tree produced by tacParser#statement.
def enterStatement(self, ctx:tacParser.StatementContext):
pass
# Exit a parse tree produced by tacParser#statement.
def exitStatement(self, ctx:tacParser.StatementContext):
pass
# Enter a parse tree produced by tacParser#expression.
def enterExpression(self, ctx:tacParser.ExpressionContext):
pass
# Exit a parse tree produced by tacParser#expression.
def exitExpression(self, ctx:tacParser.ExpressionContext):
pass
# Enter a parse tree produced by tacParser#identifier.
def enterIdentifier(self, ctx:tacParser.IdentifierContext):
pass
# Exit a parse tree produced by tacParser#identifier.
def exitIdentifier(self, ctx:tacParser.IdentifierContext):
pass
del tacParser | StarcoderdataPython |
68776 | from droput_msg.droput_msg import create_app
| StarcoderdataPython |
109913 | <filename>contrail/crawler/s3upload.py
import gzip
import json
import logging
import shutil
import urllib.request
import boto3
from contrail.configuration import config
logger = logging.getLogger('contrail.crawler')
class S3Client:
_session = boto3.Session(
aws_access_key_id=config['AWS']['access_key_id'],
aws_secret_access_key=config['AWS']['secret']
)
_client = _session.client('s3')
def upload_file_from_url(self, url: str, destination: str):
"""
Pulls data from a certain URL, compresses it, and uploads it to S3.
:param url: URL to pull data from.
:param destination: Path within S3 to store data
:return:
"""
logger.info("Uploading file {} to {}".format(url, destination))
tmpfile = "tmp.json"
zipfile = tmpfile + ".gz"
urllib.request.urlretrieve(url, tmpfile)
# source: https://docs.python.org/3/library/gzip.html
with open(tmpfile, 'rb') as f_in:
with gzip.open(zipfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
self._client.upload_file(zipfile, config['AWS']['bucket_name'], destination + ".gz")
def upload_file_from_variable(self, data: dict, destination: str):
"""
Takes data formatted as a Python dictionary, serializes it to JSON,
compresses this JSON file, and uploads it to S3.
:param data: The dictionary/list/object to serialize to JSON.
:param destination: Path within S3 to store data.
"""
logger.info("Uploading raw data to {}".format(destination))
tmpfile = "tmp-rawdata.json"
zipfile = tmpfile + ".gz"
# source: https://stackoverflow.com/questions/49534901/is-there-a-way-to-use-json-dump-with-gzip
with gzip.open(zipfile, 'wt', encoding='ascii') as f_out:
json.dump(data, f_out, indent=2)
self._client.upload_file(zipfile, config['AWS']['bucket_name'], destination + ".gz")
| StarcoderdataPython |
1776333 | <gh_stars>0
import os
WTF_CSRF_ENABLED = True
SECRET_KEY = os.environ.get("PEPY_SECRET_KEY")
DATABASE = {
"host": os.environ.get("PEPY_DATABASE_HOST"),
"user": os.environ.get("PEPY_DATABASE_USER"),
"password": <PASSWORD>("PEPY_DATABASE_PASSWORD"),
"database": os.environ.get("PEPY_DATABASE_NAME"),
}
DATABASE_ORATOR = {
"prope": {
"driver": "postgres",
"host": os.environ.get("PEPY_DATABASE_HOST"),
"user": os.environ.get("PEPY_DATABASE_USER"),
"password": <PASSWORD>("PEPY_DATABASE_PASSWORD"),
"database": os.environ.get("PEPY_DATABASE_NAME"),
}
}
ADMIN_PASSWORD = os.<PASSWORD>("PEPY_ADMIN_PASSWORD")
BQ_CREDENTIALS_FILE = os.environ.get("PEPY_BIGQUERY_CREDENTIALS")
LOGGING_DIR = "logs"
LOGGING_FILE = os.environ.get("PEPY_LOGGING_FILE")
| StarcoderdataPython |
75248 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: push_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='push_message.proto',
package='datafilter',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12push_message.proto\x12\ndatafilter\x1a\x1bgoogle/protobuf/empty.proto\"t\n\x1aPushMessageResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_PUSHMESSAGERESPONSEWRAPPER = _descriptor.Descriptor(
name='PushMessageResponseWrapper',
full_name='datafilter.PushMessageResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='datafilter.PushMessageResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='datafilter.PushMessageResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='datafilter.PushMessageResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='datafilter.PushMessageResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=179,
)
_PUSHMESSAGERESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['PushMessageResponseWrapper'] = _PUSHMESSAGERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PushMessageResponseWrapper = _reflection.GeneratedProtocolMessageType('PushMessageResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _PUSHMESSAGERESPONSEWRAPPER,
'__module__' : 'push_message_pb2'
# @@protoc_insertion_point(class_scope:datafilter.PushMessageResponseWrapper)
})
_sym_db.RegisterMessage(PushMessageResponseWrapper)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
4838153 | # -*- coding: utf-8 -*-
import mock
import flask
import pytest
import webtest
import sqlalchemy as sa
from flask_sqlalchemy import SQLAlchemy
from nplusone.core import exceptions
from nplusone.ext.flask_sqlalchemy import NPlusOne
from nplusone.ext.flask_sqlalchemy import setup_state
from tests import utils
@pytest.fixture(scope='module', autouse=True)
def setup():
setup_state()
@pytest.fixture
def db():
return SQLAlchemy()
@pytest.fixture
def models(db):
return utils.make_models(db.Model)
@pytest.fixture()
def objects(db, app, models):
hobby = models.Hobby()
address = models.Address()
user = models.User(addresses=[address], hobbies=[hobby])
db.session.add(user)
db.session.commit()
db.session.close()
@pytest.fixture
def logger():
return mock.Mock()
@pytest.fixture
def app(db, models, logger):
app = flask.Flask(__name__)
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['NPLUSONE_LOGGER'] = logger
db.init_app(app)
with app.app_context():
db.create_all()
yield app
@pytest.fixture
def wrapper(app):
return NPlusOne(app)
@pytest.fixture
def routes(app, models, wrapper):
@app.route('/many_to_one/')
def many_to_one():
users = models.User.query.all()
return str(users[0].addresses)
@app.route('/many_to_one_one/')
def many_to_one_one():
user = models.User.query.filter_by(id=1).one()
return str(user.addresses)
@app.route('/many_to_one_first/')
def many_to_one_first():
user = models.User.query.first()
return str(user.addresses)
@app.route('/many_to_one_ignore/')
def many_to_one_ignore():
with wrapper.ignore('lazy_load'):
users = models.User.query.all()
return str(users[0].addresses)
@app.route('/many_to_many/')
def many_to_many():
users = models.User.query.all()
return str(users[0].hobbies)
@app.route('/many_to_many_impossible/')
def many_to_many_impossible():
user = models.User.query.first()
users = models.User.query.all() # noqa
return str(user.hobbies)
@app.route('/many_to_many_impossible_one/')
def many_to_many_impossible_one():
user = models.User.query.one()
users = models.User.query.all() # noqa
return str(user.hobbies)
@app.route('/eager_join/')
def eager_join():
users = models.User.query.options(sa.orm.subqueryload('hobbies')).all()
return str(users[0].hobbies if users else None)
@app.route('/eager_subquery/')
def eager_subquery():
users = models.User.query.options(sa.orm.subqueryload('hobbies')).all()
# Touch class-level descriptor to exercise `None` instance checks
print(models.User.hobbies)
return str(users[0].hobbies if users else None)
@app.route('/eager_join_unused/')
def eager_join_unused():
users = models.User.query.options(sa.orm.joinedload('hobbies')).all()
return str(users[0])
@app.route('/eager_subquery_unused/')
def eager_subquery_unused():
users = models.User.query.options(sa.orm.subqueryload('hobbies')).all()
return str(users[0])
@app.route('/eager_nested/')
def eager_nested():
hobbies = models.Hobby.query.options(
sa.orm.joinedload(models.Hobby.users).joinedload(
models.User.addresses,
)
).all()
return str(hobbies[0].users[0].addresses)
@app.route('/eager_nested_unused/')
def eager_nested_unused():
hobbies = models.Hobby.query.options(
sa.orm.joinedload(models.Hobby.users).joinedload(
models.User.addresses,
)
).all()
return str(hobbies[0])
@pytest.fixture
def client(app, routes, wrapper):
return webtest.TestApp(app)
class TestNPlusOne:
def test_many_to_one(self, objects, client, logger):
client.get('/many_to_one/')
assert len(logger.log.call_args_list) == 1
args = logger.log.call_args[0]
assert 'User.addresses' in args[1]
def test_many_to_one_one(self, objects, client, logger):
client.get('/many_to_one_one/')
assert not logger.log.called
def test_many_to_one_first(self, objects, client, logger):
client.get('/many_to_one_first/')
assert not logger.log.called
def test_many_to_one_ignore(self, objects, client, logger):
client.get('/many_to_one_ignore/')
assert not logger.log.called
def test_many_to_many(self, objects, client, logger):
client.get('/many_to_many/')
assert len(logger.log.call_args_list) == 1
args = logger.log.call_args[0]
assert 'User.hobbies' in args[1]
def test_many_to_many_impossible(self, objects, client, logger):
client.get('/many_to_many_impossible/')
assert not logger.log.called
def test_many_to_many_impossible_one(self, objects, client, logger):
client.get('/many_to_many_impossible_one/')
assert not logger.log.called
def test_eager_join(self, objects, client, logger):
client.get('/eager_join/')
assert not logger.log.called
def test_eager_subquery(self, objects, client, logger):
client.get('/eager_subquery/')
assert not logger.log.called
def test_eager_join_empty(self, models, objects, client, logger):
models.User.query.delete()
client.get('/eager_join/')
assert not logger.log.called
def test_eager_subquery_empty(self, models, objects, client, logger):
models.User.query.delete()
client.get('/eager_subquery/')
assert not logger.log.called
def test_eager_join_unused(self, objects, client, logger):
client.get('/eager_join_unused/')
assert len(logger.log.call_args_list) == 1
args = logger.log.call_args[0]
assert 'User.hobbies' in args[1]
def test_eager_subquery_unused(self, objects, client, logger):
client.get('/eager_subquery_unused/')
assert len(logger.log.call_args_list) == 1
args = logger.log.call_args[0]
assert 'User.hobbies' in args[1]
def test_eager_nested_unused(self, app, wrapper, objects, client, logger):
client.get('/eager_nested/')
assert not logger.log.called
def test_eager_nested(self, app, wrapper, objects, client, logger):
client.get('/eager_nested_unused/')
assert len(logger.log.call_args_list) == 2
calls = [call[0] for call in logger.log.call_args_list]
assert any('Hobby.users' in call[1] for call in calls)
assert any('User.addresses' in call[1] for call in calls)
def test_many_to_many_raise(self, app, wrapper, objects, client, logger):
app.config['NPLUSONE_RAISE'] = True
with pytest.raises(exceptions.NPlusOneError):
client.get('/many_to_many/')
def test_many_to_many_whitelist(self, app, wrapper, objects, client, logger):
app.config['NPLUSONE_WHITELIST'] = [{'model': 'User'}]
client.get('/many_to_many/')
assert not logger.log.called
def test_many_to_many_whitelist_wildcard(self, app, wrapper, objects, client, logger):
app.config['NPLUSONE_WHITELIST'] = [{'model': 'U*r'}]
client.get('/many_to_many/')
assert not logger.log.called
def test_many_to_many_whitelist_decoy(self, app, wrapper, objects, client, logger):
app.config['NPLUSONE_WHITELIST'] = [{'model': 'Hobby'}]
client.get('/many_to_many/')
assert logger.log.called
| StarcoderdataPython |
3202045 | <reponame>antho214/RayTracing
from .specialtylenses import *
class PN_33_921(AchromatDoubletLens):
def __init__(self):
# PN for Part number
super(PN_33_921,self).__init__(fa=100.00,fb=78.32, R1=64.67,R2=-64.67, R3=-343.59,
tc1=26.00, tc2=12.7, te=24.66, n1=1.6700, n2=1.8467, diameter=75,
label="EO #33-921",
url="https://www.edmundoptics.com/p/75mm-dia-x-100mm-fl-vis-0-coated-achromatic-lens/3374/")
class PN_33_922(AchromatDoubletLens):
def __init__(self):
# PN for Part number
super(PN_33_922,self).__init__(fa=150.00,fb=126.46, R1=92.05,R2=-72.85, R3=-305.87,
tc1=23.2, tc2=23.1, te=36.01, n1=1.6700, n2=1.8467, diameter=75,
label="EO #33-922",
url="https://www.edmundoptics.com/p/75mm-dia-x-150mm-fl-vis-0-coated-achromatic-lens/3376/")
class PN_88_593(AchromatDoubletLens):
def __init__(self):
super(PN_88_593,self).__init__(fa=200.00,fb=187.69, R1=118.81, R2=-96.37, R3=-288.97,
tc1=17.94, tc2=6.00, te=15.42, n1=1.5168, n2=1.6727, diameter=75,
label="EO #88-593",
url="https://www.edmundoptics.com/p/75mm-dia-x-200mm-fl-vis-0deg-coated-achromatic-lens/30844/")
class PN_85_877(AchromatDoubletLens):
def __init__(self):
super(PN_85_877,self).__init__(fa=-10.0,fb=-11.92, R1=-6.55, R2=5.10, R3=89.10,
tc1=1.0, tc2=2.5, te=4.2, n1=N_BAF10.n(0.5876), n2=N_SF10.n(0.5876), diameter=6.25,
label="EO #85-877",
url="https://www.edmundoptics.com/p/625mm-dia-x10mm-fl-vis-nir-coated-negative-achromatic-lens/28478/")
| StarcoderdataPython |
3311439 | import torch
torch.multiprocessing.set_sharing_strategy('file_system')
import logging # noqa
from torch_geometric.data import InMemoryDataset # noqa
import time # noqa
from time import time as now # noqa
import multiprocessing # noqa
import numpy as np # noqa
from .hemibrain_dataset_random import HemibrainDatasetRandom # noqa
from .hemibrain_graph_unmasked import HemibrainGraphUnmasked # noqa
from .hemibrain_graph_masked import HemibrainGraphMasked # noqa
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class HemibrainDatasetRandomInMemory(InMemoryDataset, HemibrainDatasetRandom):
def __init__(
self,
root,
config,
db_name,
embeddings_collection,
roi_offset,
roi_shape,
length=None,
save_processed=False):
HemibrainDatasetRandom.__init__(
self,
root=root,
config=config,
db_name=db_name,
embeddings_collection=embeddings_collection,
roi_offset=roi_offset,
roi_shape=roi_shape,
length=length,
save_processed=save_processed
)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['processed_data.pt']
def process(self):
logger.info(
f'Loading {self.len} graphs with {self.config.num_workers} workers and saving them to {self.root} ...')
start = time.time()
pool = multiprocessing.Pool(
processes=self.config.num_workers,
initializer=np.random.seed,
initargs=())
data_mapresult = pool.map_async(
func=self.get_from_db,
iterable=range(self.len))
pool.close()
pool.join()
logger.info(f'processed {self.len} in {time.time() - start}s')
# strange multiprocessing syntax
start = now()
data_list = data_mapresult.get()
logger.info(f'get results from multiprocessing in {now() - start} s')
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
def _process(self):
InMemoryDataset._process(self)
def get(self, idx):
data = InMemoryDataset.get(self, idx)
if data.num_edges > self.config.max_edges:
logger.warning(
f'graph {idx} has {data.num_edges} edges, but the limit is set to {self.config.max_edges}.'
f'\nDuplicating previous graph')
return self.get((idx - 1) % self.len)
else:
return data
| StarcoderdataPython |
3294002 | <reponame>reshng10/Pro
import unittest
from soz_analizi.sekilci import isim_animals
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
def testMethod(self):
if (isim_animals.animals.__contains__('Adadovşanı')):
result=True
expected=True
self.assertEqual(result,expected)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4817166 | """Connections module."""
import pickle
import zmq
from .base import ConnectionManager
class TCPConnectionManager(ConnectionManager):
"""
Manages pool-worker TCP communication.
"""
def __init__(self, cfg):
"""TODO."""
self._context = zmq.Context()
self._sock = self._context.socket(zmq.REP)
if cfg.port == 0:
port_selected = self._sock.bind_to_random_port(
"tcp://{}".format(cfg.host))
else:
self._sock.bind("tcp://{}:{}".format(cfg.host, cfg.port))
port_selected = cfg.port
self._address = '{}:{}'.format(cfg.host, port_selected)
def register(self, worker):
"""Register a new worker."""
worker.transport.connection = self._sock
worker.transport.address = self._address
def accept(self):
"""
Accepts a new message from worker.
:return: Message received from worker transport.
:rtype: ``NoneType`` or
:py:class:`~testplan.runners.pools.communication.Message`
"""
try:
return pickle.loads(self._sock.recv(flags=zmq.NOBLOCK))
except zmq.Again:
return None
def close(self):
"""Closes TCP connections."""
self._sock.close()
| StarcoderdataPython |
1717159 | # -*- coding: utf-8 -*-
"""
Created on 2019-11-26 16:26
@author: a002028
"""
import os
import sys
package_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(package_path)
name = "algaware"
from algaware import core
from algaware import plot
from algaware import readers | StarcoderdataPython |
107648 | import json
from datetime import datetime
from discord.ext import commands
from .util import send_embed_message
from MongoDB.Connector import Connector
import pathlib
path = pathlib.Path(__file__).parent.absolute()
class Corona(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, description="Given country, it shows the specific cases inside that country, otherwise it shows general information about the virus.")
async def virus(self, ctx, country: str = None):
if country:
corona = {}
with open(path / "CoronaData" / "data.json") as f:
corona = json.load(f)
country_data = {}
for ctr in corona:
if country.lower() == ctr["Country"].lower():
country_data = ctr
break
else:
await ctx.send(f"Couldn't find any info about country {country}")
return
def convert_to_int(value):
try:
return "{:,}".format(int(country_data[value])) if country_data[value] is not None else 'None'
except ValueError:
return country_data[value]
msg = f"Total Cases : **{convert_to_int('TotalConfirmed')}**\n"
msg += f"New Cases : **{country_data['NewConfirmed']}**\n"
msg += f"Total Deaths : **{convert_to_int('TotalDeaths')}**\n"
msg += f"New Deaths : **{convert_to_int('NewDeaths')}**\n"
msg += f"Total Recovered : **{convert_to_int('TotalRecovered')}\n**"
msg += f"New Recovered : **{convert_to_int('NewRecovered')}\n**"
await send_embed_message(ctx, title=country_data["Country"], content=msg)
else:
with open(path / "CoronaData" / "total_inf.json") as f:
corona = json.load(f)
msg = ""
for k in corona:
if k == 'Date':
continue
msg += f"{k} : **{corona[k]:,}**\n"
date = datetime(
int(corona['Date'][:4]), # year
int(corona['Date'][5:7]), # month
int(corona['Date'][8:10]), # day
int(corona['Date'][11:13]), # hour
int(corona['Date'][14:16]), # min
int(corona['Date'][17:19]) # sec
)
last_updated_date = date.strftime("%m-%d-%Y %H:%M:%S")
await send_embed_message(ctx, title=f"Total Cases",
content=msg, footer_text=f"Last updated {last_updated_date}")
@commands.command(name="setnewschannel", pass_context=True, description="Sets the current channel as the corona news channel.\nBot will send news about corona virus to this channel after using this command")
async def set_channel(self, ctx):
if ctx.message.author.permissions_in(ctx.channel).manage_channels:
MongoDatabase = Connector()
news_channel_data_path = path / "guild_settings.json"
data = json.load(open(news_channel_data_path, "r"))
data["corona_news_channel"][str(ctx.guild.id)] = ctx.channel.id
with open(news_channel_data_path, "w") as f:
json.dump(data, f)
MongoDatabase.save_guild_settings(data)
await ctx.send("Succesfully set this channel to be the corona news channel")
else:
await ctx.send("You don't have required permissions to do this action.")
def setup(bot):
bot.add_cog(Corona(bot))
| StarcoderdataPython |
102937 | <reponame>intgr/django-cms
try:
from django.utils.encoding import force_unicode
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
except ImportError:
force_unicode = lambda s: str(s)
from django.utils.encoding import python_2_unicode_compatible # nopyflakes
try:
from django.db.models.loading import get_app_paths
except ImportError:
from django.db.models.loading import get_apps
try:
from django.utils._os import upath
except ImportError:
upath = lambda path: path
def get_app_paths():
"""
Returns a list of paths to all installed apps.
Useful for discovering files at conventional locations inside apps
(static files, templates, etc.)
"""
app_paths = []
for app in get_apps():
if hasattr(app, '__path__'): # models/__init__.py package
app_paths.extend([upath(path) for path in app.__path__])
else: # models.py module
app_paths.append(upath(app.__file__))
return app_paths
| StarcoderdataPython |
1673450 | <gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
#!/usr/bin/env python3
import glob
import os
from glob import glob
SCRIPT_NAME = os.path.basename(__file__)
PYRO_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
library_files = set()
os.chdir(f"{PYRO_DIR}/library")
for filename in glob("**/*.py", recursive=True):
library_files.add(filename)
os.chdir(f"{PYRO_DIR}")
frozen_library_files = (
glob("library/compiler/**/*.py", recursive=True)
+ glob("library/collections/*.py")
+ """
library/_builtins.py
library/_bytecode_utils.py
library/_codecs.py
library/_collections.py
library/_compiler.py
library/_compiler_opcode.py
library/_contextvars.py
library/_ctypes.py
library/_frozen_importlib.py
library/_frozen_importlib_external.py
library/_functools.py
library/_imp.py
library/_io.py
library/_json.py
library/_os.py
library/_path.py
library/_signal.py
library/_str_mod.py
library/_thread.py
library/_valgrind.py
library/_warnings.py
library/_weakref.py
library/array.py
library/builtins.py
library/faulthandler.py
library/gc.py
library/inspect.py
library/itertools.py
library/marshal.py
library/mmap.py
library/opcode.py
library/operator.py
library/sys.py
library/types.py
library/typing.py
library/unicodedata.py
third-party/cpython/Lib/__future__.py
third-party/cpython/Lib/_collections_abc.py
third-party/cpython/Lib/_py_abc.py
third-party/cpython/Lib/_weakrefset.py
third-party/cpython/Lib/abc.py
third-party/cpython/Lib/ast.py
third-party/cpython/Lib/codecs.py
third-party/cpython/Lib/collections/abc.py
third-party/cpython/Lib/contextlib.py
third-party/cpython/Lib/copyreg.py
third-party/cpython/Lib/dis.py
third-party/cpython/Lib/enum.py
third-party/cpython/Lib/functools.py
third-party/cpython/Lib/genericpath.py
third-party/cpython/Lib/heapq.py
third-party/cpython/Lib/importlib/__init__.py
third-party/cpython/Lib/importlib/abc.py
third-party/cpython/Lib/importlib/machinery.py
third-party/cpython/Lib/importlib/resources.py
third-party/cpython/Lib/importlib/util.py
third-party/cpython/Lib/io.py
third-party/cpython/Lib/keyword.py
third-party/cpython/Lib/os.py
third-party/cpython/Lib/posixpath.py
third-party/cpython/Lib/re.py
third-party/cpython/Lib/reprlib.py
third-party/cpython/Lib/runpy.py
third-party/cpython/Lib/sre_compile.py
third-party/cpython/Lib/sre_constants.py
third-party/cpython/Lib/sre_parse.py
third-party/cpython/Lib/stat.py
third-party/cpython/Lib/token.py
third-party/cpython/Lib/tokenize.py
third-party/cpython/Lib/warnings.py
third-party/cpython/Lib/weakref.py
third-party/cpython/Lib/zipimport.py
""".strip().split()
)
for filename in frozen_library_files:
assert os.path.exists(filename)
cpython_library_files = set()
os.chdir(f"{PYRO_DIR}/third-party/cpython/Lib")
for filename in glob("**/*", recursive=True):
if os.path.isdir(filename) or "__pycache__/" in filename:
continue
if filename in library_files:
continue
cpython_library_files.add(filename)
files = [
(f"{PYRO_DIR}/library_files.cmake", "LIBRARY_FILES", library_files),
(
f"{PYRO_DIR}/cpython_library_files.cmake",
"CPYTHON_LIBRARY_FILES",
cpython_library_files,
),
(
f"{PYRO_DIR}/frozen_library_files.cmake",
"FROZEN_LIBRARY_FILES",
frozen_library_files,
),
]
for filename, define, filelist in files:
with open(filename, "w") as fp:
fp.write(
f"""\
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# This file was generated by `util/{SCRIPT_NAME}`.
set({define}
"""
)
for filename in sorted(filelist):
fp.write(filename + "\n")
fp.write(
"""\
)
"""
)
| StarcoderdataPython |
3284493 | <filename>match/migrations/0004_systemmessage.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-18 07:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('match', '0003_reviewerinvitation'),
]
operations = [
migrations.CreateModel(
name='SystemMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('send_time', models.DateTimeField(default=django.utils.timezone.now)),
('is_read', models.BooleanField(default=False)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='received_system_messages', to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
51846 | '''
The cost of a stock on each day is given in an array.
Find the max profit that you can make by buying and selling in those days.
Only 1 stock can be held at a time.
For example:
Array = {100, 180, 260, 310, 40, 535, 695}
The maximum profit can earned by buying on day 0, selling on day 3.
Again buy on day 4 and sell on day 6.
If the given array of prices is sorted in decreasing order, then profit cannot be earned at all.
'''
'''
If we are allowed to buy and sell only once, then we can use following algorithm. Maximum difference between two elements. Here we are allowed to buy and sell multiple times.
Following is algorithm for this problem.
1. Find the local minima and store it as starting index. If not exists, return.
2. Find the local maxima. and store it as ending index. If we reach the end, set the end as ending index.
3. Update the solution (Increment count of buy sell pairs)
4. Repeat the above steps if end is not reached.
Alternate solution:
class Solution {
public int maxProfit(int[] prices) {
int maxprofit = 0;
for (int i = 1; i < prices.length; i++) {
if (prices[i] > prices[i - 1])
maxprofit += prices[i] - prices[i - 1];
}
return maxprofit;
}
}
Explanation - https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/solution/
'''
| StarcoderdataPython |
3385569 | #main script to run for each analysis
import csv
with open(r"C:\Users\User\Desktop\python_challenge\Pybank\Budget_Data.csv") as csvfile:
reader = csv.DictReader(csvfile)
DatesConsidered = []
Profit_Loss = []
Change_data=[]
previous_amount=0
total=0
#next(reader)
for row in reader:
# print(row['Date'], row['Profit/Losses'])
DatesConsidered.append(row['Date'])
total=total + int(row['Profit/Losses'])
Profit_Loss.append(row['Profit/Losses'])
# print (Profit_Loss)
for Amount_data in Profit_Loss:
#print(Profit_Loss[Amount_data])
Change_data.append(int(Amount_data)- previous_amount)
previous_amount=int(Amount_data)
# Drop the first record as it doesnt show any change
Change_data.pop(0)
#print (Change_data)
max_change=Change_data.index(max(Change_data))
min_change=Change_data.index(min(Change_data))
Date_positive_change=DatesConsidered[max_change + 1]
Date_negative_change=DatesConsidered[min_change + 1]
# print (max_change)
# print (min_change)
# print(Date_positive_change)
# print(Date_negative_change)
total_change=sum(Change_data)
#print (total_change)
DatesConsidered = set(DatesConsidered)
#print (DatesConsidered)
#print (f'No of dates ={len(DatesConsidered)}')
#print (f'total = {total}')
Average=0
Average=total_change/(len(DatesConsidered)-1)
#print("Printing avg", Average)
Header1="Financial Analysis"
Line0="----------------------------"
Line1= f'Total Months : {len(DatesConsidered)}'
Line2= f'Total : {total}'
Line3= f'Average Change : $ {Average} '
Line4=f'Greatest Increase in Profits : {Date_positive_change} ( $ {max(Change_data)})'
Line5=f'Greatest Decrease in Profits : {Date_negative_change} ($ {min(Change_data)})'
print (f'{Header1}')
print (f'{Line0}')
print (f'{Line1}')
print (f'{Line2}')
print (f'{Line3}')
print (f'{Line4}')
print (f'{Line5}')
file1 = open(r"C:\Users\reema\Desktop\PythonStuff\python-challenge\PyBank\Budget_output_final.csv","w")
file1.write(f'{Header1}\n')
file1.write(f'{Line0}\n')
file1.write(f'{Line1}\n')
file1.write(f'{Line2}\n')
file1.write(f'{Line3}\n')
file1.write(f'{Line4}\n')
file1.write(f'{Line5}\n')
file1.close()
| StarcoderdataPython |
1613938 | <gh_stars>0
'''
Created on Nov 4, 2017
@author: kiniap
'''
import csv
import cv2
import numpy as np
import random
import sklearn
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import matplotlib
import matplotlib.pyplot as plt
lines = []
'''
Read the various file obtained from training on the simulator
Input: csv files with path to center, left and righ images, and the steering angle command
Output: All the lines from the various csv files
'''
# Original data provided was NOT used in the final model
# with open('./data/driving_log0.csv') as csvfile:
# reader = csv.reader(csvfile)
# next(reader)# skip header
# for line in reader:
# lines.append(line)
'''
Process data collected while driving CCW on trk1: run1
'''
with open('./data/driving_log_trk1_ccw_r1.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
'''
Process data collected while driving CCW on trk1: run2
'''
with open('./data/driving_log_trk1_ccw_r2.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
'''
Process data collected while driving CW on trk1: run1
'''
with open('./data/driving_log_trk1_cw_r1.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
'''
Define the various parameters of the network
'''
BATCH_SIZE = 128 # batch size of 128 for the generator. Seems to work better than 32
CORRECTION = 0.25 # Steering correction applied to get the left and right steering angles (only center is provided)
EPOCHS = 3 # Number of epochs to run the training for
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
print("Number of training samples: ", len(train_samples))
print("Number of validation samples: ", len(validation_samples))
'''
Generator function: Extract the camera images and the corresponding steering angles
'''
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
camera_images = []
steering_angles = []
for batch_sample in batch_samples:
'''
Process the center image and the corresponding steering angle command
'''
file_path = batch_sample[0] # Extract the center image
filename = file_path.split('/')[-1] # Extract filename
center_image = cv2.imread('./data/IMG/'+filename) # read in the image
center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB) # covert to RGB, by default cv2.imread is BGR
center_image_flipped = np.fliplr(center_image) # flip the center image, to create mirror image data
center_steering = float(line[3]) # Extract the steering angle
center_steering_flipped = -center_steering #Flipped steering angle
'''
Randomly skip if the steering angle is close to zero wth 60% probability
This is to avoid overfitting around zero steering angle
'''
# prob = np.random.random() # generates a random number between 0 and 1
# if (prob < 0.6) and (abs(center_steering) < 0.75):
# continue;
'''
Add center_images and steering angles to the list
'''
camera_images.append(center_image)
camera_images.append(center_image_flipped)
steering_angles.append(center_steering)
steering_angles.append(center_steering_flipped)
'''
Process the left image and add a correction to the center steering angle: center steering + correction
'''
file_path = batch_sample[1]
filename = file_path.split('/')[-1]
left_image = cv2.imread('./data/IMG/'+filename)
left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB) # convert left image to RGB
left_image_flipped = np.fliplr(left_image) # Flipped left image
left_steering = center_steering+CORRECTION
left_steering_flipped = -left_steering
'''
Add left_images and the steering angles to the list
'''
camera_images.append(left_image)
camera_images.append(left_image_flipped)
steering_angles.append(left_steering)
steering_angles.append(left_steering_flipped)
'''
Process the right image and add a correction to the center steering angle: center steering - correction
'''
file_path = batch_sample[2]
filename = file_path.split('/')[-1]
right_image = cv2.imread('./data/IMG/'+filename)
right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB) # convert right image to RGB
right_image_flipped = np.fliplr(right_image) # Flipped right image
right_steering = center_steering-CORRECTION
right_steering_flipped = -right_steering
'''
Add right_images and the steering angles to the list
'''
camera_images.append(right_image)
camera_images.append(right_image_flipped)
steering_angles.append(right_steering)
steering_angles.append(right_steering_flipped)
# Make sure we atleast have a few images to train on!
if (len(camera_images) == 0):
print("\n No camera images in this batch!")
continue;
# Convert to Numpy arrays and shuffle
x_train = np.array(camera_images)
y_train = np.array(steering_angles)
y_train = y_train.reshape(-1,1)
yield shuffle(x_train, y_train)
'''
Import the various keras utilities to create and train the model
'''
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras import backend as K
'''
Define the training and validation generator functions
'''
train_generator = generator(train_samples, batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples, batch_size=BATCH_SIZE)
'''
Preprocess the image data
'''
model = Sequential()
model.add(Cropping2D(cropping=((70,25),(0,0)), input_shape=(160,320,3)))
model.add(Lambda(lambda x: K.tf.image.resize_images(x, (80, 160))))
model.add(Lambda(lambda x: (x/255.0)-0.5))
'''
Implement the Lenet architecture : Did not use this for the final model
'''
# model.add(Convolution2D(6, (5,5), activation='relu'))
# model.add(MaxPooling2D())
# model.add(Convolution2D(6, (5,5), activation='relu'))
# model.add(MaxPooling2D())
# model.add(Flatten())
# model.add(Dense(120))
# model.add(Dense(84))
# model.add(Dense(1))
'''
Implement the Nvidia architecture
'''
model.add(Convolution2D(24, (5,5), strides=(2,2), activation="relu"))
model.add(Convolution2D(36, (5,5), strides=(2,2), activation="relu"))
model.add(Convolution2D(48, (5,5), strides=(2,2), activation="relu"))
model.add(Convolution2D(64, (3,3), activation="relu"))
model.add(Convolution2D(64, (3,3), activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
print(model.summary())
'''
Compile the model with Adam optimizer and use Mean Squared Error as the loss metric
Use a generator to feed data into the model
'''
model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
steps_per_epoch = (len(train_samples)*6)//BATCH_SIZE,
validation_data=validation_generator,
validation_steps=(len(validation_samples)*6)//BATCH_SIZE,
epochs=EPOCHS)
model.save('model_new.h5')
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
| StarcoderdataPython |
1629418 | <gh_stars>0
import numpy as np
from mpi4py import MPI
from SIMP import TO_SIMP, make_Conn_matrix
from PIL import Image
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate(x0,volfrac,void,Iar,cMat):
beta=0.05
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
x,c = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,False,void,np.zeros((1,nely,nelx)),0,0)
return c
def save_as_gs(arr,name):
(nely,nelx)=arr.shape
x_new=1800
y_new=900
a_out=np.zeros((y_new,x_new))
for i in range(y_new):
for j in range(x_new):
a_out[i,j]=arr[int(i*nely/y_new),int(j*nelx/x_new)]
img = Image.fromarray(np.uint8((1-a_out) * 255) , 'L')
img.save(name+'.png',format='png')
return
def save_as_rgb(arr,name):
(nely,nelx)=arr.shape
x_new=1800
y_new=900
a_out=np.zeros((y_new,x_new))
for i in range(y_new):
for j in range(x_new):
a_out[i,j]=arr[int(i*nely/y_new),int(j*nelx/x_new)]
rgb=np.zeros((y_new,x_new,3))
rgb[a_out==0]=[255,255,255]
rgb[a_out<0]=[0,0,200]
rgb[a_out>0]=[100,250,0]
img = Image.fromarray(np.uint8(rgb))
img.save(name+'.png',format='png')
return
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
nelx=90
nely=45
volfrac=0.4
void=get_void(nely,nelx)
Iar,cMat=make_Conn_matrix(nelx,nely)
X_simp=np.load('../Figure_4/Sample_data/X_simp.npy')
X_simp=X_simp.reshape((X_simp.shape[0]*X_simp.shape[1],X_simp.shape[2],X_simp.shape[3]))
X_simp[X_simp<0.5]=0
X_simp[X_simp>=0.5]=1
mean_ok=np.where(np.mean(X_simp,(1,2))==0.4)[0]
X_simp=X_simp[mean_ok]
C=np.load('../Figure_4/Sample_data/C.npy')
C=C.reshape((C.shape[0]*C.shape[1]))
C=C[mean_ok]
X1=X_simp[np.argmin(C)]
X2=X_simp[np.argmax(C)]
if rank==0:
save_as_gs(X1,'vis_1_X1')
dX=X2-X1
plus=np.array(np.where(dX>0)).T
minus=np.array(np.where(dX<0)).T
Ip=np.arange(len(plus))
Im=np.arange(len(minus))
np.random.shuffle(Ip)
np.random.shuffle(Im)
dX1=np.zeros_like(X1)
dX2=np.zeros_like(X2)
dX1[plus[Ip[:int(0.075*len(plus))],0],plus[Ip[:int(0.075*len(plus))],1]]=1
dX1[minus[Im[:int(0.075*len(plus))],0],minus[Im[:int(0.075*len(plus))],1]]=-1
dX2[plus[Ip[int(0.075*len(plus)):],0],plus[Ip[int(0.075*len(plus)):],1]]=1
dX2[minus[Im[int(0.075*len(plus)):],0],minus[Im[int(0.075*len(plus)):],1]]=-1
grid_size=101
perrank=int(np.ceil(grid_size**2/size))
comm.Barrier()
dX1=comm.bcast(dX1,root=0)
dX2=comm.bcast(dX2,root=0)
A=np.repeat(np.arange(grid_size),grid_size)/100
B=np.tile(np.arange(grid_size),grid_size)/100
A_rank=A[rank*perrank:min((rank+1)*perrank,grid_size**2)]
B_rank=B[rank*perrank:min((rank+1)*perrank,grid_size**2)]
C_rank=np.zeros(perrank)
for i in range(len(A_rank)):
X_test=X1+dX1*A_rank[i]+dX2*B_rank[i]
C_rank[i]=evaluate(X_test,0.4,void,Iar,cMat)
if rank==0:
C=np.zeros(perrank*size)
else:
C=None
comm.Gather(C_rank,C,root=0)
if rank==0:
data=np.zeros((grid_size**2,3))
data[:,0]=A
data[:,1]=B
data[:,2]=C[:grid_size**2]
np.save('2d_projection.npy',data)
np.save('vis_1_dX1.npy',dX1)
np.save('vis_1_dX2.npy',dX2)
save_as_rgb(np.load('vis_1_dX1.npy'),'vis_1_dX1')
save_as_rgb(np.load('vis_1_dX2.npy'),'vis_1_dX2') | StarcoderdataPython |
4208 | # Copyright 2013 Cloudbase Solutions Srl
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from ceilometer.compute.virt import inspector
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = long(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
long(vm.OnTimeInMilliseconds))
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def _sum_metric_values(self, metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += long(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
def _get_metric_value_instances(self, elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
def _filter_metrics(self, all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)
| StarcoderdataPython |
3302160 | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_utils
from integ_tests.s1aptests.s1ap_utils import S1ApUtil
class TestS1SetupFailureIncorrectTac(unittest.TestCase):
"""S1 Setup Request with incorrect TAC value """
s1ap_utils._s1_util = S1ApUtil()
print("************************* Enb tester config")
req = s1ap_types.FwNbConfigReq_t()
req.cellId_pr.pres = True
req.cellId_pr.cell_id = 10
req.tac_pr.pres = True
req.tac_pr.tac = 0
assert (
s1ap_utils._s1_util.issue_cmd(s1ap_types.tfwCmd.ENB_CONFIG, req) == 0)
response = s1ap_utils._s1_util.get_response()
assert (response.msg_type ==
s1ap_types.tfwCmd.ENB_CONFIG_CONFIRM.value)
res = response.cast(s1ap_types.FwNbConfigCfm_t)
assert (res.status == s1ap_types.CfgStatus.CFG_DONE.value)
req = None
assert (s1ap_utils._s1_util.issue_cmd(
s1ap_types.tfwCmd.ENB_S1_SETUP_REQ, req) == 0)
response = s1ap_utils._s1_util.get_response()
assert (response.msg_type ==
s1ap_types.tfwCmd.ENB_S1_SETUP_RESP.value)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1674506 | <filename>tests/infura/test_celery.py
from tests.common_tests.celery import (
push_block_range_multiple_thread,
)
def test_infura_push_block_range_multiple_threads(infura_celery_worker,
infura_settings):
push_block_range_multiple_thread()
| StarcoderdataPython |
2174 | <gh_stars>0
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.entry import Entry
# TODO Add more standard tests
class TestNextSeriesSeasonSeasonsPack(object):
_config = """
templates:
global:
parsing:
series: internal
anchors:
_nss_backfill: &nss_backfill
next_series_seasons:
backfill: yes
_nss_from_start: &nss_from_start
next_series_seasons:
from_start: yes
_nss_backfill_from_start: &nss_backfill_from_start
next_series_seasons:
backfill: yes
from_start: yes
_series_ep_pack: &series_ep_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_pack: &series_ep_tracking_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s02e01: &series_ep_tracking_pack_begin_s02e01
identified_by: ep
tracking: backfill
begin: s02e01
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s04e01: &series_ep_tracking_pack_begin_s04e01
identified_by: ep
tracking: backfill
begin: s04e01
season_packs:
threshold: 1000
reject_eps: yes
tasks:
inject_series:
series:
settings:
test_series:
season_packs: always
test_series:
- Test Series 1
- Test Series 2
- Test Series 3
- Test Series 4
- Test Series 5
- Test Series 6
- Test Series 7
- Test Series 8
- Test Series 9
- Test Series 10
- Test Series 11
- Test Series 12
- Test Series 13
- Test Series 14
- Test Series 15
- Test Series 16
- Test Series 17
- Test Series 18
- Test Series 19
- Test Series 20
- Test Series 21
- Test Series 22
- Test Series 23
- Test Series 24
- Test Series 25
- Test Series 50
- Test Series 100
test_next_series_seasons_season_pack:
next_series_seasons: yes
series:
- Test Series 1:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill:
<<: *nss_backfill
series:
- Test Series 2:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 3:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_from_start:
<<: *nss_from_start
series:
- Test Series 4:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 5:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 6:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep:
next_series_seasons: yes
series:
- Test Series 7:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill:
<<: *nss_backfill
series:
- Test Series 8:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 9:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start:
<<: *nss_from_start
series:
- Test Series 10:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 11:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 12:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_gap:
next_series_seasons: yes
series:
- Test Series 13:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill:
<<: *nss_backfill
series:
- Test Series 14:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 15:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start:
<<: *nss_from_start
series:
- Test Series 16:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 17:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 18:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap:
next_series_seasons: yes
series:
- Test Series 19:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill:
<<: *nss_backfill
series:
- Test Series 20:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 21:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start:
<<: *nss_from_start
series:
- Test Series 22:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 23:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 24:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_begin_completed:
next_series_seasons: yes
series:
- Test Series 50:
identified_by: ep
begin: S02E01
season_packs:
threshold: 1000
reject_eps: yes
max_reruns: 0
test_next_series_seasons_season_pack_from_start_multirun:
next_series_seasons:
from_start: yes
series:
- Test Series 100:
<<: *series_ep_pack
max_reruns: 0
"""
@pytest.fixture()
def config(self):
"""Season packs aren't supported by guessit yet."""
return self._config
def inject_series(self, execute_task, release_name):
execute_task(
'inject_series',
options={'inject': [Entry(title=release_name, url='')], 'disable_tracking': True},
)
@pytest.mark.parametrize(
"task_name,inject,result_find",
[
('test_next_series_seasons_season_pack', ['Test Series 1 S02'], ['Test Series 1 S03']),
(
'test_next_series_seasons_season_pack_backfill',
['Test Series 2 S02'],
['Test Series 2 S01', 'Test Series 2 S03'],
),
(
'test_next_series_seasons_season_pack_backfill_and_begin',
['Test Series 3 S02'],
['Test Series 3 S03'],
),
(
'test_next_series_seasons_season_pack_from_start',
['Test Series 4 S02'],
['Test Series 4 S03'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill',
['Test Series 5 S02'],
['Test Series 5 S03', 'Test Series 5 S01'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill_and_begin',
['Test Series 6 S02'],
['Test Series 6 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep',
['Test Series 7 S02', 'Test Series 7 S03E01'],
['Test Series 7 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill',
['Test Series 8 S02', 'Test Series 8 S03E01'],
['Test Series 8 S01', 'Test Series 8 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill_and_begin',
['Test Series 9 S02', 'Test Series 9 S03E01'],
['Test Series 9 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start',
['Test Series 10 S02', 'Test Series 10 S03E01'],
['Test Series 10 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill',
['Test Series 11 S02', 'Test Series 11 S03E01'],
['Test Series 11 S03', 'Test Series 11 S01'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin',
['Test Series 12 S02', 'Test Series 12 S03E01'],
['Test Series 12 S03'],
),
(
'test_next_series_seasons_season_pack_gap',
['Test Series 13 S02', 'Test Series 13 S06'],
['Test Series 13 S07'],
),
(
'test_next_series_seasons_season_pack_gap_backfill',
['Test Series 14 S02', 'Test Series 14 S06'],
[
'Test Series 14 S07',
'Test Series 14 S05',
'Test Series 14 S04',
'Test Series 14 S03',
'Test Series 14 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_backfill_and_begin',
['Test Series 15 S02', 'Test Series 15 S06'],
['Test Series 15 S07', 'Test Series 15 S05', 'Test Series 15 S04'],
),
(
'test_next_series_seasons_season_pack_gap_from_start',
['Test Series 16 S02', 'Test Series 16 S06'],
['Test Series 16 S07'],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill',
['Test Series 17 S02', 'Test Series 17 S06'],
[
'Test Series 17 S07',
'Test Series 17 S05',
'Test Series 17 S04',
'Test Series 17 S03',
'Test Series 17 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin',
['Test Series 18 S02', 'Test Series 18 S06'],
['Test Series 18 S07', 'Test Series 18 S05', 'Test Series 18 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap',
['Test Series 19 S02', 'Test Series 19 S06', 'Test Series 19 S07E01'],
['Test Series 19 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill',
['Test Series 20 S02', 'Test Series 20 S06', 'Test Series 20 S07E01'],
[
'Test Series 20 S07',
'Test Series 20 S05',
'Test Series 20 S04',
'Test Series 20 S03',
'Test Series 20 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin',
['Test Series 21 S02', 'Test Series 21 S06', 'Test Series 21 S07E01'],
['Test Series 21 S07', 'Test Series 21 S05', 'Test Series 21 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start',
['Test Series 22 S02', 'Test Series 22 S03E01', 'Test Series 22 S06'],
['Test Series 22 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill',
['Test Series 23 S02', 'Test Series 23 S03E01', 'Test Series 23 S06'],
[
'Test Series 23 S07',
'Test Series 23 S05',
'Test Series 23 S04',
'Test Series 23 S03',
'Test Series 23 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin',
['Test Series 24 S02', 'Test Series 24 S03E01', 'Test Series 24 S06'],
['Test Series 24 S07', 'Test Series 24 S05', 'Test Series 24 S04'],
),
(
'test_next_series_seasons_season_pack_begin_completed',
['Test Series 50 S02'],
['Test Series 50 S03'],
),
],
)
def test_next_series_seasons(self, execute_task, task_name, inject, result_find):
for entity_id in inject:
self.inject_series(execute_task, entity_id)
task = execute_task(task_name)
for result_title in result_find:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(result_find)
# Tests which require multiple tasks to be executed in order
# Each run_parameter is a tuple of lists: [task name, list of series ID(s) to inject, list of result(s) to find]
@pytest.mark.parametrize(
"run_parameters",
[
(
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S01'],
],
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S02'],
],
)
],
)
def test_next_series_seasons_multirun(self, execute_task, run_parameters):
for this_test in run_parameters:
for entity_id in this_test[1]:
self.inject_series(execute_task, entity_id)
task = execute_task(this_test[0])
for result_title in this_test[2]:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(this_test[2])
| StarcoderdataPython |
3240237 | <gh_stars>0
"""Handle the console interface for the ClassifierNetwork package.
This package, while intended to be used with the ModularMailer project, can
be installed and used a standalone application, which this module handles
the interface for. The intent of of this design is so that this package can
be installed by itself without the ModularMailer project as a dependency on
a machine that is optimized for training the network (Linux box w/GPU).
"""
import argparse
import sys
import classifiernetwork.defaults as defaults
SUPPORTED_OBJECTIVES = (
'mean_squared_error',
'mse',
'mean_absolute_error',
'mae',
'mean_absolute_percentage_error',
'mape',
'mean_squared_logarithmic_error',
'msle',
'squared_hinge',
'hinge',
'binary_crossentropy',
'categorical_crossentropy',
'sparse_categorical_crossentropy',
'kullback_leibler_divergence',
'kld',
'poisson',
'cosine_proximity',
)
SUPPORTED_ACTIVATIONS = (
'softmax',
'softplus',
'softsign',
'relu',
'tanh',
'sigmoid',
'hard_sigmoid',
'linear',
)
def _build_training_subparser(train_parser):
"""Create the options for the 'train' subparser"""
train_parser.add_argument(
'input_vectors', type=str,
help='Path to the numpy array of input vectors (.npy file).'
)
train_parser.add_argument(
'output_vectors', type=str,
help='path to the numpy array of output vectors (.npy file)'
)
train_parser.add_argument(
'save_name', type=str, help='Save trained network file name.'
)
train_parser.add_argument(
'-o', '--output-directory', type=str,
help='Directory for output file. Defaults to input_vectors location.'
)
# Network compilation option
compile_group = train_parser.add_argument_group(
title='Compilation options',
description='Options for the structure of the network.'
)
compile_group.add_argument(
'-i', '--hidden-size', type=int,
help='Size of the hidden layer. Defaults to geometric_mean(in, out).'
)
compile_group.add_argument(
'-a', '--activation', type=str,
default=defaults.ACTIVATION, choices=SUPPORTED_ACTIVATIONS,
help='Activation function for the hidden layer (see Keras docs).'
)
compile_group.add_argument(
'-p', '--dropout', type=float, default=defaults.DROPOUT,
help='Fraction of the input units to drop.'
)
compile_group.add_argument(
'-l', '--loss', type=str,
default=defaults.LOSS, choices=SUPPORTED_OBJECTIVES,
help='The string identifier of an optimizer (see Keras docs).'
)
# Options for the stochastic gradient descent optimizer
sgd_group = train_parser.add_argument_group(
title='Stochastic Gradient Descent optimizer (SGD) options',
description='The network is trained using a SGD optimizer.'
)
sgd_group.add_argument(
'-r', '--learning-rate', type=float, default=defaults.LEARNING_RATE,
help='Learning rate.'
)
sgd_group.add_argument(
'-m', '--momentum', type=float, default=defaults.MOMENTUM,
help='Number of epochs to train the network.'
)
sgd_group.add_argument(
'-d', '--decay', type=float, default=defaults.DECAY,
help='Learning rate decay over each update.'
)
sgd_group.add_argument(
'-n', '--nesterov', action='store_true',
help='Apply Nesterov momentum to the SGD optimizer.'
)
# Options for training the model
train_group = train_parser.add_argument_group(
title='Training options',
description='Options for how the network is to be trained.'
)
train_group.add_argument(
'-e', '--epochs', type=int, default=defaults.EPOCH,
help='The number of epochs to train the model.'
)
train_group.add_argument(
'-s', '--validation-split', type=float,
help='Fraction of the data to use as held-out validation data.'
)
train_group.add_argument(
'--v', '--verbose', type=int,
default=defaults.VERBOSE, choices=(0, 1, 2),
help='0 for no logging, 1 for progress bar, 2 for line per epoch.'
)
train_group.add_argument(
'-b', '--batch-size', type=int,
help='Number of samples per gradient update.'
)
def argument_parser(args):
parser = argparse.ArgumentParser(
description='Trains neural networks from labeled input data.'
)
# Create subparser
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
# Parse 'train' command
train_parser = subparsers.add_parser(
'train', help='Train a neural network from the given input.'
)
_build_training_subparser(train_parser)
# Return parsed arguments
return parser.parse_args(args)
def main():
"""Entry point for the console script usage of this package.
Returns:
int: Error return code.
"""
args = argument_parser(sys.argv[1:])
return 0
| StarcoderdataPython |
3213642 | """Just a prototype for REST-ful."""
from .apis import Resource
class Ping(Resource):
"""You pong when you are pinged..."""
def get(self):
return {'returned': 'pong'}
| StarcoderdataPython |
3258585 | <reponame>decentfox/gapp-login<gh_stars>1-10
from authlib_gino.fastapi_session.gino_app import load_entry_point
from authlib_gino.fastapi_session.models import Identity
from gino import Gino
db = load_entry_point("db", Gino)
class WeChatIdentity(Identity):
wechat_unionid = db.StringProperty()
wechat_session_key = db.StringProperty()
wechat_refresh_token = db.StringProperty()
wechat_user_info = db.ObjectProperty()
@db.declared_attr
def wechat_unionid_idx(cls):
return db.Index(
"identities_wechat_unionid_idx",
cls.wechat_unionid,
postgresql_where=(db.func.starts_with(cls.idp, "WECHAT")),
)
| StarcoderdataPython |
1717863 | #!/usr/bin/env python
"""
Asset types S3 Group class
Copyright 2020-2021 Leboncoin
Licensed under the Apache License, Version 2.0
Written by <NAME> (<EMAIL>)
"""
# Standard library imports
import logging
from .asset_type import AssetType
# Debug
# from pdb import set_trace as st
LOGGER = logging.getLogger('aws-tower')
class S3Group(AssetType):
"""
S3Group Asset Type
"""
def __init__(self, name: str):
super().__init__('S3 buckets', name)
self.list = []
def audit(self, security_config):
"""
Redefinition of audit
"""
for asset in self.list:
asset.audit(security_config)
self.security_issues = [*self.security_issues, *asset.security_issues]
def get_type(self):
"""
Redefinition of get_type
"""
return 'S3'
def report(self, report, brief=False):
"""
Add an asset with only relevent informations
"""
if brief:
asset_report = self.report_brief()
else:
asset_report = {}
for asset in self.list:
if asset.report_brief():
asset_report[asset.name] = asset.report_brief()
report[self.name] = asset_report
return report
def report_brief(self):
"""
Return the report in one line
"""
result = ''
for asset in self.list:
result += f'[{asset.name}] {asset.report_brief()},'
return result
def finding_description(self, finding_title):
"""
Return a description of the finding
"""
name = finding_title.split('[')[1].split(']')[0]
for iam in self.list:
if iam.name == name:
return iam.finding_description(name)
return 'S3 bucket not found...'
| StarcoderdataPython |
1688827 | <reponame>medewitt/pantab
__version__ = "1.1.1"
from ._reader import frame_from_hyper, frames_from_hyper
from ._tester import test
from ._writer import frame_to_hyper, frames_to_hyper
__all__ = [
"__version__",
"frame_from_hyper",
"frames_from_hyper",
"frame_to_hyper",
"frames_to_hyper",
"test",
]
| StarcoderdataPython |
3241233 | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import os
import posixpath
from functools import singledispatch
from pyiron_atomistics import Atoms
from pyiron_atomistics.atomistics.master.murnaghan import Murnaghan
from pyiron_base.interfaces.has_groups import HasGroups
from pyiron_gui.wrapper.widgets import ObjectWidget, AtomsWidget, MurnaghanWidget
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 30, 2021"
@singledispatch
def PyironWrapper(py_obj, project, rel_path=""):
return BaseWrapper(py_obj, project, rel_path=rel_path)
@PyironWrapper.register
def _(py_obj: Atoms, project, rel_path=""):
return AtomsWrapper(py_obj, project, rel_path=rel_path)
@PyironWrapper.register
def _(py_obj: Murnaghan, project, rel_path=""):
return MurnaghanWrapper(py_obj, project, rel_path=rel_path)
class BaseWrapper(HasGroups):
"""Simple wrapper for pyiron objects which extends for basic pyiron functionality (list_nodes ...)"""
def __init__(self, pyi_obj, project, rel_path=""):
self._wrapped_object = pyi_obj
self._project = project
self._rel_path = rel_path
self._name = None
@property
def name(self):
return self._name
@property
def project(self):
if hasattr(self._wrapped_object, 'project'):
return self._wrapped_object.project
return self._project
@property
def path(self):
if hasattr(self._wrapped_object, 'path'):
return self._wrapped_object.path
if hasattr(self.project, 'path'):
return posixpath.join(self.project.path, self._rel_path)
raise AttributeError
def __getitem__(self, item):
try:
return self._wrapped_object[item]
except (IndexError, KeyError, TypeError):
rel_path = os.path.relpath(posixpath.join(self.path, item), self._project.path)
if rel_path == '.':
return self._project
return self._project[rel_path]
def __getattr__(self, item):
if item in ['list_nodes', 'list_groups']:
try:
return getattr(self._wrapped_object, item)
except AttributeError:
return self._empty_list
return getattr(self._wrapped_object, item)
def _list_groups(self):
if hasattr(self._wrapped_object, "list_groups"):
return self._wrapped_object.list_groups()
else:
return []
def _list_nodes(self):
if hasattr(self._wrapped_object, "list_nodes"):
return self._wrapped_object.list_nodes()
else:
return []
def __repr__(self):
return repr(self._wrapped_object)
@property
def gui(self):
return ObjectWidget(self).gui
class AtomsWrapper(BaseWrapper):
def __init__(self, pyi_obj, project, rel_path=""):
super().__init__(pyi_obj, project, rel_path=rel_path)
self._name = 'structure'
@property
def gui(self):
return AtomsWidget(self).gui
class MurnaghanWrapper(BaseWrapper):
def __init__(self, pyi_obj, project, rel_path=""):
super().__init__(pyi_obj, project, rel_path=rel_path)
self._name = 'murnaghan'
@property
def gui(self):
return MurnaghanWidget(self).gui
| StarcoderdataPython |
3225359 | class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) <= 1:
return len(s)
longest = 0
left = 0
seen = {}
for right in range(len(s)):
if s[right] in seen:
left = max(seen[s[right]], left)
longest = max(longest, right - left + 1)
seen[s[right]] = right + 1 #+1 gives the window
return longest | StarcoderdataPython |
3323632 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui',
# licensing of 'main.ui' applies.
#
# Created: Fri Jul 17 14:35:18 2020
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_main(object):
def setupUi(self, main):
main.setObjectName("main")
main.resize(580, 492)
main.setStyleSheet("QWidget{\n"
" border: 0px solid grey;\n"
" background-color: rgb(255, 131, 6);\n"
"}\n"
"\n"
"QMenu {\n"
" background-color: white;\n"
" border: 1px solid #4495D1;\n"
" padding: 1px;\n"
"}")
self.verticalLayout = QtWidgets.QVBoxLayout(main)
self.verticalLayout.setSpacing(26)
self.verticalLayout.setContentsMargins(20, 0, 20, 0)
self.verticalLayout.setObjectName("verticalLayout")
spacerItem = QtWidgets.QSpacerItem(23, 14, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem)
self.label = QtWidgets.QLabel(main)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(0, 150))
font = QtGui.QFont()
font.setFamily("MS Serif")
font.setPointSize(20)
font.setWeight(50)
font.setUnderline(False)
font.setStrikeOut(False)
font.setBold(False)
self.label.setFont(font)
self.label.setStyleSheet("background-color: rgb(255, 84, 16);\n"
"color: rgb(253, 255, 255)")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(10)
self.verticalLayout_2.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton = QtWidgets.QPushButton(main)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMinimumSize(QtCore.QSize(0, 70))
font = QtGui.QFont()
font.setFamily("Sitka Small")
font.setPointSize(11)
self.pushButton.setFont(font)
self.pushButton.setCursor(QtCore.Qt.ArrowCursor)
self.pushButton.setStyleSheet("QPushButton{\n"
" border: 1px solid grey;\n"
" background-color: rgb(130, 81, 234);\n"
" color: white;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #9651EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #AA38EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}")
self.pushButton.setObjectName("pushButton")
self.verticalLayout_2.addWidget(self.pushButton)
self.pushButton_2 = QtWidgets.QPushButton(main)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setMinimumSize(QtCore.QSize(0, 70))
font = QtGui.QFont()
font.setFamily("Sitka Small")
font.setPointSize(11)
self.pushButton_2.setFont(font)
self.pushButton_2.setCursor(QtCore.Qt.ArrowCursor)
self.pushButton_2.setStyleSheet("QPushButton{\n"
" border: 1px solid grey;\n"
" background-color: rgb(130, 81, 234);\n"
" color: white;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #9651EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #AA38EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}")
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout_2.addWidget(self.pushButton_2)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(main)
QtCore.QMetaObject.connectSlotsByName(main)
def retranslateUi(self, main):
main.setWindowTitle(QtWidgets.QApplication.translate("main", "main", None, -1))
self.label.setText(QtWidgets.QApplication.translate("main", "Main Menu", None, -1))
self.pushButton.setText(QtWidgets.QApplication.translate("main", "Текст в QR", None, -1))
self.pushButton_2.setText(QtWidgets.QApplication.translate("main", "QR в текст", None, -1))
| StarcoderdataPython |
133865 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Output/OutLoss.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Output/OutLoss
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Output.OutLoss.get_loss import get_loss
except ImportError as error:
get_loss = error
try:
from ..Methods.Output.OutLoss.get_loss_dist import get_loss_dist
except ImportError as error:
get_loss_dist = error
from ._check import InitUnKnowClassError
from .MeshSolution import MeshSolution
class OutLoss(FrozenClass):
"""Gather the loss module outputs"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Output.OutLoss.get_loss
if isinstance(get_loss, ImportError):
get_loss = property(
fget=lambda x: raise_(
ImportError("Can't use OutLoss method get_loss: " + str(get_loss))
)
)
else:
get_loss = get_loss
# cf Methods.Output.OutLoss.get_loss_dist
if isinstance(get_loss_dist, ImportError):
get_loss_dist = property(
fget=lambda x: raise_(
ImportError(
"Can't use OutLoss method get_loss_dist: " + str(get_loss_dist)
)
)
)
else:
get_loss_dist = get_loss_dist
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
loss_list=None,
meshsol_list=-1,
loss_index=-1,
logger_name="Pyleecan.Loss",
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "loss_list" in list(init_dict.keys()):
loss_list = init_dict["loss_list"]
if "meshsol_list" in list(init_dict.keys()):
meshsol_list = init_dict["meshsol_list"]
if "loss_index" in list(init_dict.keys()):
loss_index = init_dict["loss_index"]
if "logger_name" in list(init_dict.keys()):
logger_name = init_dict["logger_name"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.loss_list = loss_list
self.meshsol_list = meshsol_list
self.loss_index = loss_index
self.logger_name = logger_name
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
OutLoss_str = ""
if self.parent is None:
OutLoss_str += "parent = None " + linesep
else:
OutLoss_str += "parent = " + str(type(self.parent)) + " object" + linesep
OutLoss_str += "loss_list = " + str(self.loss_list) + linesep + linesep
if len(self.meshsol_list) == 0:
OutLoss_str += "meshsol_list = []" + linesep
for ii in range(len(self.meshsol_list)):
tmp = (
self.meshsol_list[ii].__str__().replace(linesep, linesep + "\t")
+ linesep
)
OutLoss_str += "meshsol_list[" + str(ii) + "] =" + tmp + linesep + linesep
OutLoss_str += "loss_index = " + str(self.loss_index) + linesep
OutLoss_str += 'logger_name = "' + str(self.logger_name) + '"' + linesep
return OutLoss_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.loss_list != self.loss_list:
return False
if other.meshsol_list != self.meshsol_list:
return False
if other.loss_index != self.loss_index:
return False
if other.logger_name != self.logger_name:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.loss_list is None and self.loss_list is not None) or (
other.loss_list is not None and self.loss_list is None
):
diff_list.append(name + ".loss_list None mismatch")
elif self.loss_list is None:
pass
elif len(other.loss_list) != len(self.loss_list):
diff_list.append("len(" + name + ".loss_list)")
else:
for ii in range(len(other.loss_list)):
diff_list.extend(
self.loss_list[ii].compare(
other.loss_list[ii], name=name + ".loss_list[" + str(ii) + "]"
)
)
if (other.meshsol_list is None and self.meshsol_list is not None) or (
other.meshsol_list is not None and self.meshsol_list is None
):
diff_list.append(name + ".meshsol_list None mismatch")
elif self.meshsol_list is None:
pass
elif len(other.meshsol_list) != len(self.meshsol_list):
diff_list.append("len(" + name + ".meshsol_list)")
else:
for ii in range(len(other.meshsol_list)):
diff_list.extend(
self.meshsol_list[ii].compare(
other.meshsol_list[ii],
name=name + ".meshsol_list[" + str(ii) + "]",
)
)
if other._loss_index != self._loss_index:
diff_list.append(name + ".loss_index")
if other._logger_name != self._logger_name:
diff_list.append(name + ".logger_name")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
if self.loss_list is not None:
for value in self.loss_list:
S += getsizeof(value)
if self.meshsol_list is not None:
for value in self.meshsol_list:
S += getsizeof(value)
if self.loss_index is not None:
for key, value in self.loss_index.items():
S += getsizeof(value) + getsizeof(key)
S += getsizeof(self.logger_name)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
OutLoss_dict = dict()
if self.loss_list is None:
OutLoss_dict["loss_list"] = None
else:
OutLoss_dict["loss_list"] = list()
for obj in self.loss_list:
if obj is not None:
OutLoss_dict["loss_list"].append(
obj.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
)
else:
OutLoss_dict["loss_list"].append(None)
if self.meshsol_list is None:
OutLoss_dict["meshsol_list"] = None
else:
OutLoss_dict["meshsol_list"] = list()
for obj in self.meshsol_list:
if obj is not None:
OutLoss_dict["meshsol_list"].append(
obj.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
)
else:
OutLoss_dict["meshsol_list"].append(None)
OutLoss_dict["loss_index"] = (
self.loss_index.copy() if self.loss_index is not None else None
)
OutLoss_dict["logger_name"] = self.logger_name
# The class name is added to the dict for deserialisation purpose
OutLoss_dict["__class__"] = "OutLoss"
return OutLoss_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.loss_list = None
self.meshsol_list = None
self.loss_index = None
self.logger_name = None
def _get_loss_list(self):
"""getter of loss_list"""
if self._loss_list is not None:
for obj in self._loss_list:
if obj is not None:
obj.parent = self
return self._loss_list
def _set_loss_list(self, value):
"""setter of loss_list"""
if type(value) is list:
for ii, obj in enumerate(value):
if type(obj) is dict:
class_obj = import_class(
"SciDataTool.Classes", obj.get("__class__"), "loss_list"
)
value[ii] = class_obj(init_dict=obj)
if value[ii] is not None:
value[ii].parent = self
if value == -1:
value = list()
check_var("loss_list", value, "[DataND]")
self._loss_list = value
loss_list = property(
fget=_get_loss_list,
fset=_set_loss_list,
doc=u"""Internal list of loss data
:Type: [SciDataTool.Classes.DataND.DataND]
""",
)
def _get_meshsol_list(self):
"""getter of meshsol_list"""
if self._meshsol_list is not None:
for obj in self._meshsol_list:
if obj is not None:
obj.parent = self
return self._meshsol_list
def _set_meshsol_list(self, value):
"""setter of meshsol_list"""
if type(value) is list:
for ii, obj in enumerate(value):
if type(obj) is dict:
class_obj = import_class(
"pyleecan.Classes", obj.get("__class__"), "meshsol_list"
)
value[ii] = class_obj(init_dict=obj)
if value[ii] is not None:
value[ii].parent = self
if value == -1:
value = list()
check_var("meshsol_list", value, "[MeshSolution]")
self._meshsol_list = value
meshsol_list = property(
fget=_get_meshsol_list,
fset=_set_meshsol_list,
doc=u"""Internal list of loss meshsolutions
:Type: [MeshSolution]
""",
)
def _get_loss_index(self):
"""getter of loss_index"""
return self._loss_index
def _set_loss_index(self, value):
"""setter of loss_index"""
if type(value) is int and value == -1:
value = dict()
check_var("loss_index", value, "dict")
self._loss_index = value
loss_index = property(
fget=_get_loss_index,
fset=_set_loss_index,
doc=u"""Internal dict to index losses
:Type: dict
""",
)
def _get_logger_name(self):
"""getter of logger_name"""
return self._logger_name
def _set_logger_name(self, value):
"""setter of logger_name"""
check_var("logger_name", value, "str")
self._logger_name = value
logger_name = property(
fget=_get_logger_name,
fset=_set_logger_name,
doc=u"""Name of the logger to use
:Type: str
""",
)
| StarcoderdataPython |
3208125 | from tornado.log import app_log
class ValidationArgs(object):
"""
Arguments from the validation framework used by validators
"""
def __init__(self, idl, schema, table_name, row,
p_table_name, p_row, is_new):
# General arguments
self.idl = idl
self.schema = schema
self.is_new = is_new
# Arguments specific to parent/child
if p_table_name is not None:
self.p_resource_table = p_table_name
self.p_resource_schema = schema.ovs_tables[p_table_name]
self.p_resource_idl_table = idl.tables[p_table_name]
self.p_resource_row = p_row
self.resource_table = table_name
self.resource_schema = schema.ovs_tables[table_name]
self.resource_idl_table = idl.tables[table_name]
self.resource_row = row
class BaseValidator(object):
"""
Base class for validators to provide as a hook and registration
mechanism. Derived classes will be registered as validators.
resource: Used for registering a validator with a resource/table name.
It is used for validator lookup. Derived classes must define
a value for proper registration/lookup.
"""
resource = ""
def type(self):
return self.__class__.__name__
def validate_modification(self, validation_args):
app_log.debug("validate_modification not implemented for " +
self.type())
def validate_deletion(self, validation_args):
app_log.debug("validate_deletion not implemented for " + self.type())
| StarcoderdataPython |
144042 | import pytest
from barista.models import Match
def test_both_trigger_and_triggers():
with pytest.raises(ValueError):
Match.parse_obj(
{
"replace": "asd",
"trigger": "asd",
"triggers": ["asd", "abc"],
}
)
def test_neither_trigger_or_triggers():
with pytest.raises(ValueError):
Match.parse_obj({"replace": "asd"})
def test_trigger():
Match.parse_obj(
{
"replace": "asd",
"trigger": "ads",
}
)
def test_triggers():
Match.parse_obj(
{
"replace": "asd",
"triggers": ["ads", "ads2"],
}
)
| StarcoderdataPython |
40574 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
import json
import sys
from datetime import datetime
class tprexchange(Exchange):
def describe(self):
return self.deep_extend(super(tprexchange, self).describe(), {
'id': 'tprexchange',
'name': 'TPR Exchange',
# 'countries': ['US'],
# 'rateLimit': 500,
'version': 'v1',
'certified': False,
'has': {
'loadMarkets': True,
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': False,
'createMarketOrder': False,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingFees': False,
'fetchL2OrderBook': False,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': True,
'fetchOrderTrades': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': False,
'signIn': True,
'withdraw': False,
'getMarketPrice': True,
},
'timeframes': {
'1m': '1',
'1h': '60',
'1d': '1440',
'1w': '10080',
'1mn': '43200',
},
'urls': {
'logo': '',
'api': '{hostname}',
'www': '',
'doc': '',
'fees': '',
'referral': '',
},
'api': {
'private': {
'get': [
],
'post': [
'ucenter/api-login',
'ucenter/member/balance',
'market/symbol-thumb',
'market/coins-info',
'market/symbol-info',
'exchange/order/add',
'exchange/order/find',
'exchange/order/all',
'exchange/order/apicancel',
'exchange/order/trades',
'exchange/order/my-trades',
'exchange/exchange-coin/base-symbol',
],
'delete': [
],
},
'feed': {
'get': [
],
},
},
'fees': {
'trading': {
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': False,
},
'precisionMode': SIGNIFICANT_DIGITS,
'options': {
'createMarketBuyOrderRequiresPrice': False,
},
'exceptions': {
'exact': {
'Invalid cost': InvalidOrder, # {"message":"Invalid cost","_links":{"self":{"href":"/orders","templated":false}}}
'Invalid order ID': InvalidOrder, # {"message":"Invalid order ID","_links":{"self":{"href":"/orders/4a151805-d594-4a96-9d64-e3984f2441f7","templated":false}}}
'Invalid market !': BadSymbol, # {"message":"Invalid market !","_links":{"self":{"href":"/markets/300/order-book","templated":false}}}
},
'broad': {
'Failed to convert argument': BadRequest,
},
},
})
def parse_ticker(self, response):
if len(response) == 0:
return []
symbol = response[0].get('symbol')
high = 0
bidVolume = 0
askVolume = 0
vwap = 0
vwapCost = 0
vwapVolume = 0
open_ = 'None'
close = 0
last = close
previousClose = 'None'
change = 'None'
percentage = 'None'
average = 'None'
baseVolume = 0
quoteVolume = 0
time = 0
lastDayTime = int((datetime.now().timestamp() - 86400) * 1000)
currentTimestamp = int(datetime.now().timestamp() * 1000)
currentDatetime = str(datetime.fromtimestamp(currentTimestamp * 0.001))
low = response[0].get('price')
bid = 0
ask = sys.maxsize
openSellOrdersCount = 0
for order in response:
price = order.get('price')
amount = order.get('amount')
timestamp = order.get('timestamp')
if high < price:
high = price
if low > price:
low = price
if order.get('status') == 'open':
if order.get('side') == 'buy':
if bid < price:
bid = price
if bidVolume < amount:
bidVolume = amount
if order.get('status') == 'open':
if order.get('side') == 'sell':
openSellOrdersCount += 1
if ask > price:
ask = price
if askVolume < amount:
askVolume = amount
if order.get('info').get('status') == 'COMPLETED':
vwapCost += price * amount
vwapVolume += amount
if time < timestamp:
time = timestamp
close = price
if timestamp > lastDayTime:
quoteVolume += amount
baseVolume += price
if vwapVolume != 0:
vwap = vwapCost / vwapVolume
if openSellOrdersCount == 0:
ask = 0
last = close
result = {
'symbol': symbol,
'info': response,
'timestamp': currentTimestamp,
'datetime': currentDatetime,
'high': high,
'low': low,
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open_,
'close': close,
'last': last,
'previousClose': previousClose,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
}
return result
def fetch_ticker(self, symbol, since=None, limit=None):
response = self.fetch_orders(symbol, since, limit)
# Response example:
# {
# 'symbol': 'BTC/USDT',
# 'info': [...],
# 'timestamp': 1615386851976,
# 'datetime': '2021-03-10 16:34:11.976000',
# 'high': 50.0,
# 'low': 1.0,
# 'bid': 30.0,
# 'bidVolume': 15.0,
# 'ask': 40.0,
# 'askVolume': 25.0,
# 'vwap': 11.0,
# 'open': 'None',
# 'close': 20.0,
# 'last': 20.0,
# 'previousClose': 'None',
# 'change': 'None',
# 'percentage': 'None',
# 'average': 'None',
# 'baseVolume': 60.0,
# 'quoteVolume': 30.0
# }
return self.parse_ticker(response)
def fetch_tickers(self, since=None, limit=None):
# Response example:
# [
# {
# 'symbol': 'BTC/USDT',
# 'info': [...],
# 'timestamp': 1615386851976,
# 'datetime': '2021-03-10 16:34:11.976000',
# 'high': 50.0,
# 'low': 1.0,
# 'bid': 30.0,
# 'bidVolume': 15.0,
# 'ask': 40.0,
# 'askVolume': 25.0,
# 'vwap': 11.0,
# 'open': 'None',
# 'close': 20.0,
# 'last': 20.0,
# 'previousClose': 'None',
# 'change': 'None',
# 'percentage': 'None',
# 'average': 'None',
# 'baseVolume': 60.0,
# 'quoteVolume': 30.0
# },
# ...
# ]
result = []
symbols = self.fetch_markets()
for symblol in symbols:
response = self.fetch_orders(symblol.get('symbol'), since, limit)
ticker = self.parse_ticker(response)
if len(ticker) != 0:
result.append(ticker)
return result
def fetch_order_book(self, symbol, limit, since=0):
# Response example:
# {
# 'bids':
# [
# [20.0, 10.0, 'E161538482263642'], // [price, amount, orderId]
# [30.0, 15.0, 'E161538482271646']
# ],
# 'asks':
# [
# [40.0, 20.0, 'E161538482278825'],
# [50.0, 25.0, 'E161538482286085']
# ],
# 'timestamp': 1615390711695,
# 'datetime': '2021-03-10 17:38:31.695000',
# 'nonce': 1615390711695
# }
orders = self.fetch_open_orders(symbol, since, limit)
bids = []
asks = []
for order in orders:
temp = []
temp.append(order.get('price'))
temp.append(order.get('amount'))
temp.append(order.get('id'))
if order.get('side') == 'buy':
bids.append(temp)
else:
asks.append(temp)
currentTimestamp = int(datetime.now().timestamp() * 1000)
currentDatetime = str(datetime.fromtimestamp(currentTimestamp * 0.001))
result = {
'bids': bids,
'asks': asks,
'timestamp': currentTimestamp,
'datetime': currentDatetime,
'nonce': currentTimestamp,
}
return result
def parse_markets(self, response):
listData = []
for value in response:
tmp = {
"id": value.get("coinSymbol"),
"symbol": value.get("symbol"),
"base": value.get("coinSymbol"),
"quote": value.get("baseSymbol"),
"baseId": value.get("coinSymbol"),
"quoteId": value.get("baseSymbol"),
"type": value.get("publishType"),
"active": value.get("enable"),
"precision": {
"amount": value.get("coinScale"),
"price": value.get("baseCoinScale"),
},
"limits": {
"amount": {"min": value.get("minVolume"), "max": value.get("maxVolume")},
"price": {"min": value.get("minSellPrice"), "max": value.get("maxBuyPrice")},
"cost": {"min": value.get("minVolume") * value.get("minSellPrice"), "max": value.get("maxVolume") * value.get("maxBuyPrice")},
},
"taker": value.get("fee"),
"maker": value.get("fee"),
"info": value,
}
listData.append(tmp)
return listData
def add_frame(self, timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice):
frame = []
frame.append(timeFrameStart)
frame.append(openPrice)
frame.append(highestPrice)
frame.append(lowestPrice)
frame.append(closePrice)
frame.append(amount)
result.append(frame)
def parse_ohlcv(self, response, since, timeframe):
highestPrice = 0
lowestPrice = sys.maxsize
price = 0
amount = 0
timeFrameStart = since
timeFrameEnd = int((since * 0.001 + timeframe) * 1000)
result = []
i = 0
orders = response.get('content')
isOpenPrice = True
openPrice = 0
closePrice = 0
while i < len(orders):
if isOpenPrice == True:
openPrice = orders[i].get('price')
isOpenPrice = False
time = orders[i].get('time')
if time >= timeFrameStart and time <= timeFrameEnd:
price = orders[i].get('price')
closePrice = price
if highestPrice < price:
highestPrice = price
if lowestPrice > price:
lowestPrice = price
amount += orders[i].get('amount')
i += 1
if i == len(orders):
self.add_frame(timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice)
else:
if lowestPrice == sys.maxsize:
lowestPrice = 0
openPrice = 0
closePrice = 0
i -= 1
self.add_frame(timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice)
timeFrameStart = timeFrameEnd + 1
timeFrameEnd = int((timeFrameEnd * 0.001 + timeframe) * 1000)
amount = 0
highestPrice = 0
lowestPrice = sys.maxsize
isOpenPrice = True
i += 1
return result
# timeframe variants:
# 1m (one minute);
# 1h (one hour);
# 1d (one day - 24 hours)
# 1w (one week - 7 days)
# 1mn (one mounth - 30 days)
def fetch_ohlcv(self, symbol, timeframe=None, since=0, limit=None, params={}):
# Response example:
# [
# [
# 1504541580000, // UTC timestamp in milliseconds, integer
# 4235.4, // (O)pen price, float
# 4240.6, // (H)ighest price, float
# 4230.0, // (L)owest price, float
# 4230.7, // (C)losing price, float
# 37.72941911 // (V)olume (in terms of the base currency), float
# ],
# ...
# ]
inputDataCheck = False
for frame in self.timeframes:
if frame == timeframe:
inputDataCheck = True
break
if inputDataCheck == False:
return {'error': 'Incorrect timeframe'}
tFrame = int(self.timeframes.get(timeframe)) * 60
default_order_amount_limit = 100
params['status'] = 'COMPLETED'
if 'page' in params:
params['pageNo'] = self.safe_string(params, 'page')
else:
params['pageNo'] = 0
if since is None:
since = 0
if limit is None:
limit = default_order_amount_limit
request = {
'symbol': symbol,
'since': since,
'pageSize': limit,
}
fullRequest = self.extend(request, params)
response = self.privatePostExchangeOrderAll(fullRequest)
return self.parse_ohlcv(response, since, tFrame)
def fetch_markets(self, symbol=''):
request = {
'symbol': symbol,
}
response = self.privatePostMarketSymbolInfo(request)
return self.parse_markets(response)
# RETURN EXAMPLE:
# [
# {
# 'id': 'BTC',
# 'symbol': 'BTC/USDT',
# 'base': 'BTC',
# 'quote': 'USDT',
# 'baseId': 'BTC',
# 'quoteId': 'USDT',
# 'type': 'NONE',
# 'active': 1,
# 'precision': { 'amount': 2, 'price': 2 },
# 'limits':
# {
# 'amount': { 'min': 0.0, 'max': 0.0 },
# 'price': { 'min': 0.0, 'max': 0.0 },
# 'cost': { 'min': 0.0, 'max': 0.0 }
# },
# 'taker': 0.001,
# 'maker': 0.001,
# 'info': {backend response}
# },
# ...
# ]
def load_markets(self, reload=False, symbol=''):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(symbol)
return self.set_markets(markets, currencies)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
# Check existance of authentication token
# Just use empy one in case of an application is not signed in yet
authToken = ''
if 'token' in self.options:
authToken = self.options['token']
# Get URL
url = self.implode_params(self.urls['api'], {'hostname': self.hostname}) + '/' + path
# Calculate body and content type depending on method type: GET or POST
keys = list(params.keys())
keysLength = len(keys)
# In case of body is still not assigned just make it empty string
if body is None:
body = ''
# Prepare line for hashing
# This hash sum is checked on backend side to verify API user
# POST params should not be added as body
query = method + ' /' + path + ' ' + self.urlencode(params) + ' ' + authToken + '\n' + body
signed = self.hmac(self.encode(query), self.encode(self.secret))
contentType = None
if method == 'POST':
contentType = 'application/x-www-form-urlencoded'
if keysLength > 0:
body = self.urlencode(params)
else:
if keysLength > 0:
url += '?' + self.urlencode(params)
headers = {
'x-auth-sign': signed,
'x-auth-token': authToken,
}
if authToken != '':
headers['access-auth-token'] = authToken
if contentType is not None:
headers['Content-Type'] = contentType
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def sign_in(self, params={}):
params = {
'key': self.key,
'token': self.token,
}
response = self.privatePostUcenterApiLogin(params)
loginData = response['data']
self.options['token'] = self.safe_string(loginData, 'token')
memberId = self.safe_string(loginData, 'id')
return memberId
def fetch_status(self):
# Responce examples:
# {'status': 'ok'}
# or
# {'status': 'shutdown', 'reason': 'ExchangeNotAvailable'}
# or
# {'status': 'shutdown', 'reason': 'Unknown reason'}
result = False
try:
response = self.privatePostExchangeExchangeCoinBaseSymbol()
for field in response.items():
if field[0] == 'message':
if field[1] == 'SUCCESS':
result = True
if result is True:
return {"status": "ok"}
else:
return {"status": "shutdown", "reason": "ExchangeNotAvailable"}
except:
reason = str(sys.exc_info()[0])
if reason.find('ExchangeNotAvailable') != -1:
return {"status": "shutdown", "reason": "ExchangeNotAvailable"}
else:
return {"status": "shutdown", "reason": "Unknown reason"}
def parse_currencies(self, response):
listData = []
for value in response:
tmp = {
'id': value.get('name'),
'code': value.get('name').upper(),
'name': value.get('name'),
'active': bool(value.get('status')),
'fee': 0.005,
'precision': 0,
'limits':
{
'amount':
{
'min': 'None',
'max': 'None',
},
'price':
{
'min': 'None',
'max': 'None',
},
'cost':
{
'min': 'None',
'max': 'None',
},
'withdraw':
{
'min': value.get('minWithdrawAmount'),
'max': value.get('maxWithdrawAmount'),
},
},
'info': value
}
listData.append(tmp)
return listData
def fetch_currencies(self):
# Responce example
#[
# {
# 'id': 'BTC',
# 'code': 'BTC',
# 'name': 'BTC',
# 'active': True,
# 'fee': 0.001,
# 'precision': 0,
# 'limits': // TPR exchange has no restrictions
# {
# 'amount':
# {
# 'min': 'None',
# 'max': 'None'
# },
# 'price':
# {
# 'min': 'None',
# 'max': 'None'
# },
# 'cost':
# {
# 'min': 'None',
# 'max': 'None'
# },
# 'withdraw':
# {
# 'min': 1.0,
# 'max': 5000.0
# }
# },
# 'info': { },
# },
# ...
#]
try:
response = self.privatePostMarketCoinsInfo()
return self.parse_currencies(response)
except:
reason = str(sys.exc_info()[0])
if reason.find('ExchangeNotAvailable') != -1:
return {"Error": "ExchangeNotAvailable"}
else:
return {"Error": "Unknown reason"}
def fetch_order(self, id, symbol=None, params={}):
request = {
'orderId': id,
}
response = self.privatePostExchangeOrderFind(request)
return self.parse_order(response)
def parse_order(self, order, market=None):
# {
# 'orderId':'E161183624377614',
# 'memberId':2,
# 'type':'LIMIT_PRICE',
# 'amount':1000.0,
# 'symbol':'BCH/USDT',
# 'tradedAmount':1000.0,
# 'turnover':1080.0,
# 'coinSymbol':'BCH',
# 'baseSymbol':'USDT',
# 'status':'COMPLETED',
# 'latestTradeTimestamp':1611836256242,
# 'direction':'SELL',
# 'price':1.0,
# 'time':1611836243776,
# 'completedTime':1611836256242,
# },
if not order:
return None
type = 'market'
if order['type'] == 'LIMIT_PRICE':
type = 'limit'
side = order['direction'].lower()
remaining = order['amount'] - order['tradedAmount']
status = order['status']
if status == 'COMPLETED':
status = 'closed'
elif status == 'TRADING' or status == 'PAUSED' or status == 'RESERVED':
status = 'open'
else:
status = 'canceled'
cost = order['tradedAmount'] * order['price']
result = {
'info': order,
'id': order['orderId'],
'clientOrderId': order['memberId'],
'timestamp': order['time'],
'datetime': self.iso8601(order['time']),
'latestTradeTimestamp': order['latestTradeTimestamp'],
'symbol': order['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': order['price'],
'stopPrice': None,
'cost': cost,
'average': None,
'amount': order['amount'],
'filled': order['tradedAmount'],
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
return result
def create_order(self, symbol, type, side, amount, price=None, params={}):
params['symbol'] = symbol
params['price'] = price
params['amount'] = amount
if side == 'buy':
params['direction'] = 'BUY'
else:
params['direction'] = 'SELL'
if type == 'market':
params['type'] = 'MARKET_PRICE'
else:
params['type'] = 'LIMIT_PRICE'
params['useDiscount'] = '0'
response = self.privatePostExchangeOrderAdd(params)
orderId = self.safe_string(response, 'data')
return self.fetch_order(orderId)
def cancel_order(self, id, symbol=None, params={}):
request = {
'orderId': id,
}
response = self.privatePostExchangeOrderApicancel(self.extend(request, params))
return self.parse_order(response['data'])
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
# Request structure
# {
# 'symbol': Parameter from method arguments
# 'since': Timestamp of first order in list in Unix epoch format
# 'limit': Response list size
# 'memberId': May be set in params. May be not set
# 'status': one of TRADING COMPLETED CANCELED OVERTIMED. May be set in params
# 'page': for pagination. In self case limit is size of every page. May be set in params
# }
default_order_amount_limit = 10000
if 'page' in params:
params['pageNo'] = self.safe_string(params, 'page')
else:
params['pageNo'] = 0
if symbol is None:
symbol = ''
if since is None:
since = 0
if limit is None:
limit = default_order_amount_limit
request = {
'symbol': symbol,
'since': since,
'pageSize': limit,
}
fullRequest = self.extend(request, params)
response = self.privatePostExchangeOrderAll(fullRequest)
# {
# 'content': [
# {
# 'orderId':'E161183624377614',
# 'memberId':2,
# 'type':'LIMIT_PRICE',
# 'amount':1000.0,
# 'symbol':'BCH/USDT',
# 'tradedAmount':1000.0,
# 'turnover':1080.0,
# 'coinSymbol':'BCH',
# 'baseSymbol':'USDT',
# 'status':'COMPLETED',
# 'direction':'SELL',
# 'price':1.0,
# 'time':1611836243776,
# 'completedTime':1611836256242,
# },
# ...
# ],
# 'totalElements':41,
# 'totalPages':3,
# 'last':False,
# 'size':20,
# 'number':1,
# 'first':False,
# 'numberOfElements':20,
# 'sort': [
# {
# 'direction':'DESC',
# 'property':'time',
# 'ignoreCase':False,
# 'nullHandling':'NATIVE',
# 'ascending':False,
# 'descending':True,
# }
# ]
# }
return self.parse_orders(response['content'])
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# Request structure
# {
# 'symbol': Parameter from method arguments
# 'since': Timestamp of first order in list in Unix epoch format
# 'limit': Response list size
# 'memberId': May be set in params. May be not set
# 'status': one of TRADING COMPLETED CANCELED OVERTIMED. May be set in params
# 'page': for pagination. In self case limit is size of every page. May be set in params
# }
default_order_amount_limit = 20
if 'page' in params:
params['pageNo'] = self.safe_string(params, 'page')
else:
params['pageNo'] = 0
if symbol is None:
symbol = ''
if since is None:
since = 0
if limit is None:
limit = default_order_amount_limit
request = {
'symbol': symbol,
'since': since,
'pageSize': limit,
}
fullRequest = self.extend(request, params)
response = self.privatePostExchangeOrderAll(fullRequest)
# {
# 'content': [
# {
# 'orderId':'E161183624377614',
# 'memberId':2,
# 'type':'LIMIT_PRICE',
# 'amount':1000.0,
# 'symbol':'BCH/USDT',
# 'tradedAmount':1000.0,
# 'turnover':1080.0,
# 'coinSymbol':'BCH',
# 'baseSymbol':'USDT',
# 'status':'COMPLETED',
# 'direction':'SELL',
# 'price':1.0,
# 'time':1611836243776,
# 'completedTime':1611836256242,
# },
# ...
# ],
# 'totalElements':41,
# 'totalPages':3,
# 'last':False,
# 'size':20,
# 'number':1,
# 'first':False,
# 'numberOfElements':20,
# 'sort': [
# {
# 'direction':'DESC',
# 'property':'time',
# 'ignoreCase':False,
# 'nullHandling':'NATIVE',
# 'ascending':False,
# 'descending':True,
# }
# ]
# }
return self.parse_orders(response['content'])
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Request structure
# {
# 'symbol': Parameter from method arguments
# 'since': Timestamp of first order in list in Unix epoch format
# 'limit': Response list size
# 'memberId': May be set in params. May be not set
# 'status': one of TRADING COMPLETED CANCELED OVERTIMED. May be set in params
# 'page': for pagination. In self case limit is size of every page. May be set in params
# }
default_order_amount_limit = 20
params['status'] = 'CANCELED'
if 'page' in params:
params['pageNo'] = self.safe_string(params, 'page')
else:
params['pageNo'] = 0
if symbol is None:
symbol = ''
if since is None:
since = 0
if limit is None:
limit = default_order_amount_limit
request = {
'symbol': symbol,
'since': since,
'pageSize': limit,
}
fullRequest = self.extend(request, params)
response = self.privatePostExchangeOrderAll(fullRequest)
# {
# 'content': [
# {
# 'orderId':'E161183624377614',
# 'memberId':2,
# 'type':'LIMIT_PRICE',
# 'amount':1000.0,
# 'symbol':'BCH/USDT',
# 'tradedAmount':1000.0,
# 'turnover':1080.0,
# 'coinSymbol':'BCH',
# 'baseSymbol':'USDT',
# 'status':'COMPLETED',
# 'direction':'SELL',
# 'price':1.0,
# 'time':1611836243776,
# 'completedTime':1611836256242,
# },
# ...
# ],
# 'totalElements':41,
# 'totalPages':3,
# 'last':False,
# 'size':20,
# 'number':1,
# 'first':False,
# 'numberOfElements':20,
# 'sort': [
# {
# 'direction':'DESC',
# 'property':'time',
# 'ignoreCase':False,
# 'nullHandling':'NATIVE',
# 'ascending':False,
# 'descending':True,
# }
# ]
# }
return self.parse_orders(response['content'])
# If call without params the function returns balance of current user
def fetch_balance(self, uid='-1', params={}):
params = {
'uid': uid
}
try:
response = self.privatePostUcenterMemberBalance(params)
except Exception as e:
return e
return self.parse_balance(response)
def parse_balance(self, response):
data = json.loads(json.dumps(response))
if data['message'] == 'SUCCESS':
result = { "free":{}, "used":{}, "total":{}}
for row in data['data']['balances']:
result['free'].update({row['coinName']:row['free']})
result['used'].update({row['coinName']:row['used']})
result['total'].update({row['coinName']:row['total']})
result.update({row['coinName']:{'free':row['free'], 'used':row['used'], 'total':row['total']}})
return result
# Returns int or None
def get_market_price(self, symbol):
response = self.privatePostMarketSymbolThumb()
for i in response:
if i.get('symbol') == symbol:
return i.get('close')
def fetch_trades(self, orderId, since, pageNo=None, pageSize=None):
# Responce example:
# [
# {
# 'info': { backend response },
# 'id': 'E161460499516968',
# 'timestamp': 1614605187661,
# 'datetime': '2021-03-01 15:26:27.661000',
# 'symbol': 'BTC/USDT',
# 'order': 'E161460499516968',
# 'type': 'LIMIT_PRICE',
# 'side': 'SELL',
# 'takerOrMaker': 'None', (Have no this information inside TPR exchange)
# 'price': 1.0,
# 'amount': 1.0,
# 'cost': 1.0,
# 'fee':
# {
# 'cost': 0.005,
# 'currency': 'BTC',
# 'rate': 'None' (Have no this information inside TPR exchange)
# }
# }
# ]
if pageNo is None:
pageNo = 0
if pageSize is None:
pageSize = 100
request = { 'orderId': orderId,
'since': since,
'pageNo': pageNo,
'pageSize': pageSize }
return self.parse_trade(self.privatePostExchangeOrderTrades(request))
def parse_trade(self, response):
trades = []
content = response.get('content')
for exchangeTrade in content:
timestamp = exchangeTrade.get('time')
datetime_ = str(datetime.fromtimestamp(int(timestamp) * 0.001))
price = exchangeTrade.get('price')
amount = exchangeTrade.get('amount')
cost = price * amount
tmp = {
'info': exchangeTrade,
'id': exchangeTrade.get('orderId'),
'timestamp': timestamp,
'datetime': datetime_,
'symbol': exchangeTrade.get('symbol'),
'order': exchangeTrade.get('orderId'),
'type': exchangeTrade.get('type'),
'side': exchangeTrade.get('direction'),
'takerOrMaker': 'None',
'price': price,
'amount': amount,
'cost': cost,
'fee':
{
'cost': exchangeTrade.get('fee'),
'currency': exchangeTrade.get('coinSymbol'),
'rate': 'None',
}
}
trades.append(tmp)
return trades
def parse_my_trades(self, response):
listData = []
for value in response:
ExchangeOrder = response.get(value)
id_ = ExchangeOrder.get('orderId')
timestamp = ExchangeOrder.get('time')
datetime_ = str(datetime.fromtimestamp(int(timestamp) * 0.001))
price = ExchangeOrder.get('price')
amount = ExchangeOrder.get('amount')
cost = price * amount
tmp = {
'info': response.get(value),
'id': id_,
'timestamp': timestamp,
'datetime': datetime_,
'symbol': ExchangeOrder.get('symbol'),
'order': id_,
'type': ExchangeOrder.get('type'),
'side': ExchangeOrder.get('direction'),
'takerOrMaker': 'None',
'price': price,
'amount': amount,
'cost': cost,
'fee':
{
'cost': ExchangeOrder.get('fee'),
'currency': ExchangeOrder.get('coinSymbol'),
'rate': 'None',
}
}
listData.append(tmp)
return listData
def fetch_my_trades(self, pageNo=None, pageSize=None):
# Responce example:
# [
# {
# 'info': { backend response },
# 'id': 'E161460499516968',
# 'timestamp': 1614605187661,
# 'datetime': '2021-03-01 15:26:27.661000',
# 'symbol': 'BTC/USDT',
# 'order': 'E161460499516968',
# 'type': 'LIMIT_PRICE',
# 'side': 'SELL',
# 'takerOrMaker': 'None', (Have no this information inside TPR exchange)
# 'price': 1.0,
# 'amount': 1.0,
# 'cost': 1.0,
# 'fee':
# {
# 'cost': 0.001,
# 'currency': 'BTC',
# 'rate': 'None' (Have no this information inside TPR exchange)
# }
# },
# { ... },
# ]
if pageNo is None:
pageNo = 0
if pageSize is None:
pageSize = 100
request = { 'orderId': '',
'pageNo': pageNo,
'pageSize': pageSize }
return self.parse_my_trades(self.privatePostExchangeOrderMyTrades(request))
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if httpCode == 200:
if 'code' in response:
if response['code'] == 0:
return
else:
return
# {
# "message": "Error text in case when HTTP code is not 200",
# ...
# }
message = self.safe_string(response, 'message')
if message is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| StarcoderdataPython |
114647 | '''interface with serlib.cparser module
We'll use platform defined C-types int (np.intc) and long long int
(np.ulonglong) for maximal portability because python C-api does not
support fixed width C types: https://docs.python.org/3/c-api/long.html
For numpy reference see
https://numpy.org/devdocs/user/basics.types.html
For platform reference see
https://en.cppreference.com/w/cpp/language/types
On 32 bit data model ILP32 (Win32, 32-bit Linux, OSX) and 64 bit
datamodel LP64 (Linux, OSX) and LLP64 (Windows):
int is 32 bit
long long is 64 bit
Py_ssize_t is signed integer of the same bitwidth as ssize_t
Py_ssize_t seems to be equivalent to np.intp
"Integer used for indexing, typically the same as ssize_t"
per https://numpy.org/devdocs/user/basics.types.html
'''
from typing import List
import numpy as np
import serlib.cparser
# buffer limit to exchange with C code to parse S-expression
# TODO: need to find how to determine BUFFER_SIZE_LIMIT dynamically
BUFFER_SIZE_LIMIT = 100000000
def hash_bytestring(bytestring: bytes) -> int:
''' interface to serlib.cparser.hash_string '''
return serlib.cparser.hash_string(bytestring)
# TODO: REMOVE
# def list_of_string(input_s: str, parser=None) -> List[str]:
# ''' from s-expression string "(expr_1 expr_2 ... expr_k)"
# returns a list of strings ["expr_1", "expr_2", ..., "expr_k"]
# '''
# if parser is None:
# p = SExpParser()
# input_b = input_s.encode()
# res, loc = p.postfix_of_bytestring(input_b), p.last_location()
# n_args = -res[-1]
# if not n_args >= 0:
# raise ValueError("the input s-exprression {input_s} is not an s-expr list (may be an atom?)")
# else:
# args = []
# for i in range(n_args):
# _, loc = p.postfix_of_bytestring(input_b, [i]), p.last_location()
# args.append(input_b[loc].decode())
# return args
class SExpParser:
''' This class provides parsing functions for s-expressions.
A typical usage is to parse s-expressions obtained as goals
from coq-serapi
'''
def __init__(self):
# Load the shared library
buffer_size = BUFFER_SIZE_LIMIT
# Persistent dictionary
self.hash_list = np.zeros(buffer_size, dtype=np.ulonglong)
self.dict = {}
self.inv_dict = [b'']
def postfix_of_sexp(self, string, address=None):
"""
return a postfix representation in np.array[int] of the input string
containing the subtree s-expression at the address
"""
return self.postfix_of_bytestring(string.encode('utf8'), address)
def postfix_of_bytestring(self, bytestring, address=None):
"""
return a postfix representation in np.array[int] of the input s-expression bytestring
at the tree address address
//former parse_bytestring
"""
if address is None:
address = []
np_address = np.array(address, dtype=np.intc)
self._start_pos, self._end_pos, post_fix, np_add_dict = serlib.cparser.parse(
bytestring, np_address, self.hash_list, len(self.dict))
for i in range(np_add_dict.shape[0]//2):
start = np_add_dict[2*i]
end = np_add_dict[2*i+1]
word = bytestring[start:end]
word_hash = hash_bytestring(word)
self.hash_list[len(self.dict)] = word_hash
self.dict[word] = len(self.dict)+1
self.inv_dict.append(word)
return post_fix
def parse_bytestring_new(self, bytestring, address=[]):
postfix = self.postfix_of_bytestring(bytestring, [])
ann = serlib.cparser.annotate(postfix)
start, end = serlib.cparser.subtree(postfix, ann, np.array(address, dtype=np.intc))
return postfix[start:end]
def last_location(self):
return slice(self._start_pos, self._end_pos)
def hash_dict(self):
return {key: self.hash_list[value-1] for key,value in self.dict.items()}
def to_sexp_legacy(self, encoding_list):
stack = []
for value in encoding_list:
if value > 0:
new_element = self.inv_dict[value]
elif value == 0:
new_element = b'()'
else:
new_element = b'(' + b' '.join(stack[value:]) + b')'
del(stack[value:])
stack.append(new_element)
if stack:
return stack[0].decode('utf8')
else:
return None
def to_sexp(self, encoding_list):
stack = []
for value in encoding_list:
if value > 0:
new_element = self.inv_dict[value]
elif value == 0:
new_element = b'()'
else:
value_ = len(stack) + value
new_element = b'(' + stack[value_]
for element in stack[value_+1:]:
if not (new_element[-1] in [ord(')'), ord('"')] or element[0] in [ord('('), ord('"')]):
new_element += b' '
new_element += element
new_element += b')'
del(stack[value:])
stack.append(new_element)
if stack:
return stack[0].decode('utf8')
else:
return None
def check_inverse(parser: SExpParser, bytestring: bytes) -> bool:
encoding = parser.postfix_of_bytestring(bytestring)
decoding = parser.to_sexp(encoding)
reencoding = parser.postfix_of_bytestring(decoding)
return (encoding == reencoding).all()
def encode(string, address):
levels = len(address)
open_pars = []
output = []
pos = 0
while pos < len(string):
if string[pos] == '(':
open_pars.append(0)
pos += 1
elif string[pos] == ')':
last_element = open_pars.pop()
if open_pars[:levels] == address:
output.append(-last_element)
if open_pars:
open_pars[-1] += 1
pos += 1
else:
token = ''
while pos < len(string) and string[pos] not in '()':
token += string[pos]
pos += 1
print(token, open_pars)
if open_pars[:levels] == address:
output.append(token)
open_pars[-1] += 1
return output
| StarcoderdataPython |
1686404 | <reponame>Stunnerr/vkwave
import typing
import pydantic
from enum import Enum
class MessagesSendPeerIdsData(pydantic.BaseModel):
peer_id: int = pydantic.Field(
..., description="",
)
| StarcoderdataPython |
1663905 | """Unit test package for instrumentdatabaseapi."""
| StarcoderdataPython |
60906 | def grade(x):
if 101>x>=90:
return 'Your Grade is A'
elif 89>=x>=80:
return 'Your Grade is B'
elif 79>=x>=70:
return 'Your Grade is C'
elif 69>=x>=60:
return 'Your Grade is D'
return 'not a correct value'
y = ['Score and Grade']
for i in range (0,10):
a = input('Enter a score between 60 and 100. ->')
x = ('Score: '+ str(a)+ ';'+ grade(a))
y.append(x)
i+=1
print '\n'.join(y)
print 'End of the program. Bye!'
| StarcoderdataPython |
90463 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
from pandas_mate.tests import create_test_df
from pandas_mate import util
data_file_path = os.path.join(os.path.dirname(__file__), "data.csv")
def setup_module(module):
if not os.path.exists(data_file_path):
df = create_test_df(1000000)
df.to_csv(data_file_path, index=False)
def test_read_csv_arg_preprocess():
iterator, chunksize = util.read_csv_arg_preprocess(
data_file_path, 5*1000*1000)
assert iterator is True
assert chunksize <= 100000 # 83334
def test_ascii_table():
df = create_test_df(3)
s = util.ascii_table(df)
if __name__ == "__main__":
import os
pytest.main([os.path.basename(__file__), "--tb=native", "-s", ])
| StarcoderdataPython |
3281909 | <reponame>zhengxiaowai/sshm<filename>sshr/cli.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import readline
import click
import json
from six.moves import input as raw_input
from collections import defaultdict
from clients import get_client, init_client, get_supported_platform
from utils import prompt_line, mkdir
@click.group()
def cli():
pass
@cli.command()
@click.option('--platform', required=True, type=click.Choice(get_supported_platform()))
def init(platform):
init_client(platform)
@cli.command()
def add():
client = get_client()
ssh_config = defaultdict(str)
while True:
ssh_config['hostname'] = prompt_line(
'HostName[{}]: ',
ssh_config['hostname'],
(ssh_config['hostname'],))
ssh_config['host'] = prompt_line(
'Host[{}]: ',
ssh_config['host'],
(ssh_config['host'],))
ssh_config['port'] = prompt_line(
'Port[{}]: ',
ssh_config['port'],
(ssh_config['port'],))
ssh_config['user'] = prompt_line(
'User[{}]: ',
ssh_config['user'],
(ssh_config['user'],))
ssh_config['identityfile'] = prompt_line(
'IdentityFile[{}]: ',
ssh_config['identityfile'],
(ssh_config['identityfile'],))
ok = raw_input(json.dumps(ssh_config, indent=4) +
'\nare you sure? [Y/N]: ')
ok = ok.strip()
if ok.lower() == 'y':
break
else:
continue
hostname = ssh_config['hostname']
identityfile = ssh_config['identityfile']
if identityfile:
with open(identityfile) as f:
cert_content = f.read()
cert_filename = '{}.cert'.format(hostname)
client.upload(cert_content, cert_filename)
config_filename = '{}.json'.format(hostname)
ssh_config = json.dumps(ssh_config, indent=4)
client.upload(ssh_config, config_filename)
@cli.command()
def list():
client = get_client()
for filename in client.list(''):
if filename.endswith('.json'):
click.echo(os.path.splitext(filename)[0])
@cli.command()
@click.argument('hostname')
def info(hostname):
client = get_client()
path = '{}.json'.format(hostname)
content = client.download(path)
click.echo(content)
@cli.command()
@click.argument('hostname')
def delete(hostname):
client = get_client()
ssh_json = '{}.json'.format(hostname)
ssh_cert = '{}.cert'.format(hostname)
client.delete(ssh_json)
client.delete(ssh_cert)
@cli.command()
@click.argument('hostname')
def connect(hostname):
client = get_client()
ssh_json = '{}.json'.format(hostname)
content = client.download(ssh_json)
ssh_config = json.loads(content)
local_ssh_cert_path = ''
if ssh_config.get('identityfile', None):
ssh_cert_path = '{}.cert'.format(hostname)
ssh_cert_filename = '{}.cert'.format(hostname)
home_env = os.getenv('HOME', None)
local_ssh_cert_dir = os.path.join(home_env, '.sshr_certs')
mkdir(local_ssh_cert_dir)
local_ssh_cert_path = os.path.join(
local_ssh_cert_dir, ssh_cert_filename)
content = client.download(ssh_cert_path)
with open(local_ssh_cert_path, 'w') as f:
f.write(str(content))
os.chmod(local_ssh_cert_path, 0o600)
user = ssh_config['user']
host = ssh_config['host']
port = ssh_config['port']
ssh_command = ''
if local_ssh_cert_path:
ssh_command = 'ssh {}@{} -p {} -i {}'.format(
user, host, port, local_ssh_cert_path)
else:
ssh_command = 'ssh {}@{} -p {}'.format(
user, host, port)
# mac Sierra(10.12.2) ssh-add -K
os.system(ssh_command)
if local_ssh_cert_path:
os.remove(local_ssh_cert_path)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
3225295 | <filename>Project2/test_main.py
import random
from unittest import TestCase
import datetime
import matplotlib.pyplot as plt
import numpy as np
from main import gaussian_elimination, compute_tomograph
class Test(TestCase):
def test_gaussian_elimination(self):
size = random.randint(0, 100)
A = np.random.rand(size, size)
x = np.random.rand(size)
b = np.dot(A, x)
A_elim, b_elim = gaussian_elimination(A, b, True)
self.assertTrue(np.allclose(np.linalg.solve(A_elim, b_elim), x)) # Check if system is still solvable
self.assertTrue(np.allclose(A_elim, np.triu(A_elim))) # Check if matrix is upper triangular
class Test(TestCase):
def test_compute_tomograph(self):
t = datetime.datetime.now()
print("Start time: " + str(t.hour) + ":" + str(t.minute) + ":" + str(t.second))
# Compute tomographic image
n_shots = 64 # 128
n_rays = 64 # 128
n_grid = 32 # 64
tim = compute_tomograph(n_shots, n_rays, n_grid)
t = datetime.datetime.now()
print("End time: " + str(t.hour) + ":" + str(t.minute) + ":" + str(t.second))
# Visualize image
plt.imshow(tim, cmap='gist_yarg', extent=[-1.0, 1.0, -1.0, 1.0],
origin='lower', interpolation='nearest')
plt.gca().set_xticks([-1, 0, 1])
plt.gca().set_yticks([-1, 0, 1])
plt.gca().set_title('%dx%d' % (n_grid, n_grid))
plt.show()
| StarcoderdataPython |
50735 | """
Builds wheel files for the dependencies of an app, specified in requirements.txt, into the wheels/
folder of the app repo, and updates the app's JSON config specifying any generated wheels as pip
dependencies.
NOTE: If running this script with the --repair_wheels flag, make sure the script is executed from
a manylinux2014_x86_64 container https://github.com/pypa/manylinux
"""
import argparse
import json
import logging
import os
import pathlib
import random
import re
import shutil
import string
import subprocess
import sys
from collections import namedtuple
PLATFORM = 'manylinux2014_x86_64'
REPAIRED_WHEELS_REL_PATH = 'repaired-wheels'
WHEEL_PATTERN = re.compile(
r'^(?P<distribution>([A-Z0-9][A-Z0-9._-]*[A-Z0-9]))-([0-9]+\.?)+-'
r'(?P<python_version>[A-Z0-9]+\.?[A-Z0-9]+)-.+-'
r'(?P<platform>.+)\.whl$',
re.IGNORECASE)
Wheel = namedtuple('Wheel', ['file_name', 'distribution', 'python_version', 'platform'])
AppJsonWheelEntry = namedtuple('AppJsonWheel', ['module', 'input_file'])
PIP_DEPENDENCIES = 'pip_dependencies'
PIP3_DEPENDENCIES = 'pip3_dependencies'
PY2_WHEELS_DIR = PY2_TAG = 'py2'
PY3_WHEELS_DIR = PY3_TAG = 'py3'
SHARED_WHEELS_DIR = 'shared'
PY2_PY3_TAG = '{}.{}'.format(PY2_TAG, PY3_TAG)
CP2_TAG_PATTEN = re.compile(r'cp2\d?')
CP3_TAG_PATTERN = re.compile(r'cp3\d{0,2}')
AppJson = namedtuple('AppJson', ['file_name', 'content'])
APP_JSON_INDENT = 4
def _load_app_json(app_dir):
json_files = [f for f in os.listdir(app_dir)
if not f.endswith('.postman_collection.json') and f.endswith('.json')]
if len(json_files) != 1:
error_msg = 'Expected a single json file in {} but got {}'.format(app_dir, json_files)
logging.error(error_msg)
raise ValueError(error_msg)
with open(os.path.join(app_dir, json_files[0])) as f:
return AppJson(json_files[0], json.load(f))
def _repair_wheels(wheels_to_check, all_wheels, wheels_dir):
"""
Uses auditwheel to 1) check for platform wheels depending on external binary dependencies
and 2) bundle external binary dependencies into the platform wheels in necessary. Repaired
wheels are placed in a sub dir of wheels_dir by auditwheel, which we then use to replace
the original wheels at the root level of wheels_dir.
https://github.com/pypa/auditwheel
"""
if subprocess.run(['auditwheel', '-V']).returncode != 0:
logging.warning('auditwheel is not installed or is not supported on the given platform. '
'Skipping wheel repairs.')
return
repaired_wheels_dir = os.path.join(wheels_dir, REPAIRED_WHEELS_REL_PATH)
for whl in wheels_to_check:
logging.info('Checking %s', whl)
whl_path = os.path.join(wheels_dir, whl.file_name)
if subprocess.run(['auditwheel', 'show', whl_path]).returncode != 0:
logging.info('Skipping non-platform wheel %s', whl)
else:
repair_result = subprocess.run(['auditwheel', 'repair', whl_path,
'--plat', PLATFORM, '-w', repaired_wheels_dir], capture_output=True)
if repair_result.returncode != 0:
logging.warning('Failed to repair platform wheel %s', whl)
continue
# original wheel will be replaced by repaired wheels written to repaired-wheels/
os.remove(whl_path)
all_wheels.remove(whl)
if os.path.exists(repaired_wheels_dir):
for whl in os.listdir(repaired_wheels_dir):
shutil.copyfile(os.path.join(repaired_wheels_dir, whl), os.path.join(wheels_dir, whl))
match = WHEEL_PATTERN.match(whl)
all_wheels.add(Wheel(
whl, match.group('distribution'), match.group('python_version'), match.group('platform')))
shutil.rmtree(repaired_wheels_dir)
def _remove_platform_wheels(all_built_wheels, new_wheels_dir, existing_app_json_wheel_entries):
"""
Removes all platform wheels in :param: all_built_wheels from :param: new_wheels_dir
If there's an existing wheel specified in the app json for a dependency that we just built
a platform wheel for, then we'll assume the existing wheel is compatible for Phantom and
return it to indicate that the wheel should not be deleted.
"""
existing_wheels = {w.module: w for w in existing_app_json_wheel_entries}
existing_wheels_entries_to_keep = []
for whl in list(all_built_wheels):
if whl.platform != 'any':
logging.info('Removing platform wheel %s', whl.file_name)
all_built_wheels.remove(whl)
os.remove(os.path.join(new_wheels_dir, whl.file_name))
# Check if the app already has a wheel packaged for the given dependency
# to avoid deleting it
if whl.distribution in existing_wheels:
existing_whl_path = existing_wheels[whl.distribution].input_file
logging.info('Existing wheel for %s to be retained: %s',
whl.distribution, existing_whl_path)
existing_wheels_entries_to_keep.append(
AppJsonWheelEntry(whl.distribution, existing_whl_path))
return existing_wheels_entries_to_keep
def _update_app_json(app_json, pip_dependencies_key, wheel_entries, app_dir):
"""
Updates the app's JSON config to specify that the wheels under the
repo's wheel/ folder be installed as dependencies.
https://docs.splunk.com/Documentation/Phantom/4.10.7/DevelopApps/Metadata#Specifying_pip_dependencies
"""
wheel_paths = [{
'module': w.module,
'input_file': w.input_file
} for w in sorted(wheel_entries, key=lambda w: w.module)]
app_json.content[pip_dependencies_key] = {'wheel': wheel_paths}
with open(os.path.join(app_dir, app_json.file_name), 'w') as out:
json.dump(app_json.content, out, indent=APP_JSON_INDENT)
out.write('\n')
def _parse_pip_dependency_wheels(app_json, pip_dependency_key):
pip_dependencies = app_json.content.get(pip_dependency_key, {'wheel': []})
return [AppJsonWheelEntry(w['module'], w['input_file'])
for w in pip_dependencies.get('wheel', [])]
def _copy_new_wheels(new_wheels, new_wheels_dir, app_dir, app_json, pip_dependencies_key):
"""
Copies new wheels to the wheels/ directory of the app dir.
"""
new_wheel_paths = []
def copy_wheel(wheel_name, dst_path):
src_fp = os.path.join(new_wheels_dir, wheel_name)
new_wheel_paths.append(os.path.join('wheels', dst_path))
logging.info('Writing %s --> %s', wheel_name, new_wheel_paths[-1])
shutil.copyfile(src_fp, os.path.join(app_dir, new_wheel_paths[-1]))
# Make sure to write the new wheels under appropriate wheels/(py2|py3|shared) sub paths
# when the app supports both pip_dependencies/pip3_dependencies
other_key = PIP_DEPENDENCIES if pip_dependencies_key == PIP3_DEPENDENCIES else PIP3_DEPENDENCIES
if other_key in app_json.content:
for path in (PY2_WHEELS_DIR, PY3_WHEELS_DIR, SHARED_WHEELS_DIR):
pathlib.Path(os.path.join(app_dir, 'wheels', path)).mkdir(parents=True, exist_ok=True)
for whl in new_wheels:
if whl.python_version == PY2_PY3_TAG:
sub_path = os.path.join(SHARED_WHEELS_DIR, whl.file_name)
elif whl.python_version == PY2_TAG or CP2_TAG_PATTEN.match(whl.python_version):
sub_path = os.path.join(PY2_WHEELS_DIR, whl.file_name)
elif whl.python_version == PY3_TAG or CP3_TAG_PATTERN.match(whl.python_version):
sub_path = os.path.join(PY3_WHEELS_DIR, whl.file_name)
else:
raise ValueError('{} has an unexpected python version tag: {}'.format(
whl.file_name, whl.python_version))
copy_wheel(whl.file_name, sub_path)
else:
for whl in new_wheels:
copy_wheel(whl.file_name, whl.file_name)
return new_wheel_paths
def _remove_unreferenced_wheel_paths(app_dir, existing_wheel_paths, new_wheel_paths, wheel_entries_for_other_py_version):
"""
Removes wheels from the app directory that will no longer be referenced by in app JSON.
"""
all_referenced_wheel_paths = set(new_wheel_paths + [w.input_file for w in wheel_entries_for_other_py_version])
for path in existing_wheel_paths:
if path not in all_referenced_wheel_paths:
logging.info('Removing unreferenced wheel under path %s', path)
path = os.path.join(app_dir, path)
if not os.path.exists(path):
logging.warning('%s does not exist!', path)
continue
os.remove(os.path.join(app_dir, path))
def main(args):
"""
Main entrypoint.
"""
app_dir, pip_path, repair_wheels, pip_dependencies_key = \
args.app_dir, args.pip_path, args.repair_wheels, args.pip_dependencies_key
wheels_dir, requirements_file = '{}/wheels'.format(app_dir), '{}/requirements.txt'.format(app_dir)
pathlib.Path(wheels_dir).mkdir(exist_ok=True)
logging.info('Building wheels for %s from %s into %s',
pip_dependencies_key, requirements_file, wheels_dir)
temp_dir = os.path.join(app_dir, ''.join(random.choices(string.digits, k=10)))
os.mkdir(temp_dir)
try:
build_result = subprocess.run([pip_path, 'wheel',
'-f', wheels_dir,
'-w', temp_dir,
'-r', requirements_file], capture_output=True)
if build_result.stdout:
logging.info(build_result.stdout.decode())
if build_result.stderr:
logging.warning(build_result.stderr.decode())
if build_result.returncode != 0:
logging.error('Failed to build wheels from requirements.txt. '
'This typically occurs when you have a version conflict in requirements.txt or '
'you depend on a library requiring external development libraries (eg, python-ldap). '
'In the former case, please resolve any version conflicts before re-running this script. '
'In the latter case, please manually build the library in a manylinux https://github.com/pypa/manylinux '
'container, making sure to first install any required development libraries. If you are unable '
'to build a required dependency for your app, please raise an issue in the app repo for further assistance.')
return
# Some apps may have different dependencies for Python2 and Python3, and
# we don't want to override the wheels for the Python version we aren't building for
app_json = _load_app_json(app_dir)
if pip_dependencies_key == PIP3_DEPENDENCIES:
existing_app_json_wheel_entries = _parse_pip_dependency_wheels(app_json, PIP3_DEPENDENCIES)
else: # pip_dependencies_key == 'pip_dependencies
existing_app_json_wheel_entries = _parse_pip_dependency_wheels(app_json, PIP_DEPENDENCIES)
existing_wheel_paths = set(w.input_file for w in existing_app_json_wheel_entries)
wheel_file_names = set(os.listdir(temp_dir))
all_built_wheels = set(Wheel(m.group(), m.group('distribution'), m.group('python_version'), m.group('platform'))
for m in (WHEEL_PATTERN.match(f) for f in wheel_file_names))
updated_app_json_wheel_entries = []
if repair_wheels:
logging.info('Repairing new platform wheels...')
wheels_to_repair, existing_wheel_file_names = [], set(os.path.basename(p) for p in existing_wheel_paths)
for wheel in all_built_wheels:
if wheel.file_name not in existing_wheel_file_names:
wheels_to_repair.append(wheel)
_repair_wheels(wheels_to_repair, all_built_wheels, temp_dir)
else:
logging.warning('New platform wheels will not be repaired but removed.')
# Remove any platform wheels for dependencies that we just built, but check for any
# existing wheels for these given dependencies - we won't replace them
existing_platform_wheel_entries = _remove_platform_wheels(
all_built_wheels, temp_dir, existing_app_json_wheel_entries)
# Ensure the entries in the app JSON for the existing wheels don't get overwritten
updated_app_json_wheel_entries.extend(existing_platform_wheel_entries)
existing_wheel_paths -= set(w.input_file for w in existing_platform_wheel_entries)
# Add the newly built wheels and remove the wheels no longer needed from the wheels folder
new_wheel_paths = _copy_new_wheels(all_built_wheels, temp_dir, app_dir, app_json, pip_dependencies_key)
wheels_for_other_py_version = _parse_pip_dependency_wheels(app_json, PIP_DEPENDENCIES) \
if pip_dependencies_key == PIP3_DEPENDENCIES else _parse_pip_dependency_wheels(app_json, PIP3_DEPENDENCIES)
_remove_unreferenced_wheel_paths(app_dir=app_dir,
new_wheel_paths=new_wheel_paths,
existing_wheel_paths=existing_wheel_paths,
wheel_entries_for_other_py_version=wheels_for_other_py_version)
logging.info('Updating app json with latest dependencies...')
for pair in zip(all_built_wheels, new_wheel_paths):
updated_app_json_wheel_entries.append(
AppJsonWheelEntry(pair[0].distribution, pair[1]))
_update_app_json(app_json, pip_dependencies_key, updated_app_json_wheel_entries, app_dir)
except:
logging.exception('Unexpected error')
finally:
shutil.rmtree(temp_dir)
def parse_args():
help_str = ' '.join(line.strip() for line in __doc__.strip().splitlines())
parser = argparse.ArgumentParser(description=help_str)
parser.add_argument('app_dir', help='Path to the target app directory'),
parser.add_argument('pip_path', help='Path to the pip installation to use')
parser.add_argument('pip_dependencies_key', choices=[PIP_DEPENDENCIES, PIP3_DEPENDENCIES],
help='Key in the app JSON specifying pip dependencies')
parser.add_argument('--repair_wheels', action='store_true',
help='Whether to repair platform wheels with auditwheel'),
return parser.parse_args()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
sys.exit(main(parse_args()))
| StarcoderdataPython |
1673104 |
#MousePressed
def setup():
size(240, 120)
strokeWeight(30)
def draw():
background(204)
stroke(102)
line(40, 0, 70, height)
if mousePressed:
if mouseButton == LEFT:# Pinta a linha branca se o botão esquerdo for pressionado
stroke(255)
else:
stroke(0)
line(0, 70, width, 50)
| StarcoderdataPython |
4808651 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : element_tree_xml_parser
@Author : 周恒-z50003220
@Email : <EMAIL>
@Create : 2019/8/12-10:21
@Python :Python 3.7.3
@IDE : PyCharm
@Version : 0.0.1
@Change_log
2019/8/12-10:21 created
"""
import re
from collections.abc import Iterable
from xml.etree.ElementTree import Element, ElementTree, fromstring
class ReadOnlyXMLParserWithEncoding:
def __init__(self, file_path: str, encoding: str = 'utf-8'):
"""
read only xml parser for read from < gbk > or other encoding method
:param file_path: the target xml file read from
:param encoding: the encoding method of input xml file, may write on the xml head
=> <?xml version="1.0" encoding="GBK"?> or not, default encoding is < UTF-8 >
:exception FileNotFoundError, < LookupError: unknown encoding: GB++K >, <ValueError: UnicodeDecodeError>
"""
self.__tree = ''
self.__root = ''
self.__find_prefix = ''
with open(file_path, 'r', encoding=encoding) as f:
self.__tree = ElementTree(element=fromstring(f.read()))
self.__root = self.__tree.getroot()
self.__find_prefix = re.match(r'{.*}', self.__root.tag).group(0)
def get_tree(self) -> ElementTree:
return self.__tree
def get_root(self) -> Element:
return self.__root
def get_find_prefix(self) -> str:
return self.__find_prefix
class TPSXMLParser(ReadOnlyXMLParserWithEncoding):
def __init__(self, file_path: str, encoding: str = 'GBK'):
"""
specially for TPS xml parse => default encoding in < GBK >
what is most important is the < self.__all_measInfo_dict > contains all inner < measInfo > data
:param file_path: the xml file path read file from
:param encoding: encoding method, default is < GBK >
"""
super().__init__(file_path, encoding)
self.__prefix = self.get_find_prefix()
self.__root = self.get_root()
self.__xml_basic_info = {
'fileFormatVersion': '',
'vendorName': '',
'fileSender': '',
'beginTime': '',
'endTime': '',
'userLabel': ''
}
self.__all_measInfo_list = []
# trigger for xml parser basic info
self.__fill_basic_info()
# trigger for xml parser data info
self.__fill_all_measInfo()
def get_basic_info(self) -> dict:
return self.__xml_basic_info
def get_all_measInfo(self) -> Iterable:
yield from self.__all_measInfo_list
def __fill_basic_info(self):
"""
fill basic information of TPS xml file, put it into self.__xml_basic_info
including:
namespace query prefix
xml root node
tps xml => < fileSender > in < fileHeader >
tps xml recode < beginTime > < endTime >
tps measInfo of < userLabel > in < managedElement >
:return:
"""
prefix = self.__prefix
root = self.__root
fileHeader = root.find(prefix + 'fileHeader')
# fileHeader attribs => < fileFormatVersion > and < vendorName >
self.__xml_basic_info['fileFormatVersion'] = fileHeader.attrib.get('fileFormatVersion')
self.__xml_basic_info['vendorName'] = fileHeader.attrib.get('vendorName')
# fileHeader sub nodes => < fileSender > and < measCollec >
fileHeader_file_sender_type = fileHeader.find(prefix + 'fileSender')
self.__xml_basic_info['fileSender'] = fileHeader_file_sender_type.attrib.get('elementType')
fileHeader_measCollec = fileHeader.find(prefix + 'measCollec')
self.__xml_basic_info['beginTime'] = fileHeader_measCollec.attrib.get('beginTime')
# file data info < measData > sub node => < managedElement >
managedElement = root.find(prefix + 'measData').find(prefix + 'managedElement')
self.__xml_basic_info['userLabel'] = managedElement.attrib.get('userLabel')
# fileFooter sub node => < measCollec >
fileFooter_measCollec = root.find(prefix + 'fileFooter').find(prefix + 'measCollec')
self.__xml_basic_info['endTime'] = fileFooter_measCollec.attrib.get('endTime')
def __fill_all_measInfo(self):
"""
fill all measurement data of TPS xml file => many data in < measInfo > node => put it into self.__all_measInfo_list
for every measurement data => including
measInfoId: single
duration: single
measTypes: muti
measValue: muti
:return:
"""
prefix = self.__prefix
root = self.__root
all_measInfo = root.find(prefix + 'measData').findall(prefix + 'measInfo')
for single_measInfo in all_measInfo:
# append first info key => the measInfo Id, such as => <measInfo measInfoId="1526726781">
measInfo_dict = {
'measInfoId': single_measInfo.attrib.get('measInfoId') or -1,
'duration': single_measInfo.find(prefix + 'granPeriod').attrib.get('duration'),
'measTypes': single_measInfo.find(prefix + 'measTypes').text.split(),
'measValue_list': []
}
measValue_list = []
all_measValue = single_measInfo.findall(prefix + 'measValue')
for single_measValue in all_measValue:
single_measValue_dict = {'measObjLdn': None, 'measResults': None, 'suspect': None}
single_measValue_dict['measObjLdn'] = single_measValue.attrib.get('measObjLdn')
single_measValue_dict['measResults'] = single_measValue.find(prefix + 'measResults').text.split()
single_measValue_dict['suspect'] = single_measValue.find(prefix + 'suspect').text
measValue_list.append(single_measValue_dict)
measInfo_dict['measValue_list'] = measValue_list
self.__all_measInfo_list.append(measInfo_dict)
if __name__ == '__main__':
from TrafficPredictService.util.running_measure import get_obj_size_bytes, log_run_time_second_print
@log_run_time_second_print
def parser_test() -> TPSXMLParser:
parser = TPSXMLParser('A20190605.0000+0800-0015+0800_98.16.15.253.xml', 'GBK')
basic_info = parser.get_basic_info()
all_measInfo = parser.get_all_measInfo()
return parser
print(get_obj_size_bytes(parser_test()) / 1000000, 'MB')
| StarcoderdataPython |
194106 | <filename>src/OpenSSL/__init__.py
# Copyright (C) <NAME>
# See LICENSE for details.
"""
pyOpenSSL - A simple wrapper around the OpenSSL library
"""
from OpenSSL import SSL, crypto
from OpenSSL.version import (
__author__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__uri__,
__version__,
)
__all__ = [
"SSL",
"crypto",
"__author__",
"__copyright__",
"__email__",
"__license__",
"__summary__",
"__title__",
"__uri__",
"__version__",
]
| StarcoderdataPython |
25992 | <gh_stars>0
# Copyright (C) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import voluptuous
import yaml
from nodepool import config as nodepool_config
from nodepool import provider_manager
from nodepool import tests
class TestShadeIntegration(tests.IntegrationTestCase):
def _cleanup_cloud_config(self):
os.remove(self.clouds_path)
def _use_cloud_config(self, config):
config_dir = fixtures.TempDir()
self.useFixture(config_dir)
self.clouds_path = os.path.join(config_dir.path, 'clouds.yaml')
self.useFixture(fixtures.MonkeyPatch(
'os_client_config.config.CONFIG_FILES',
[self.clouds_path]))
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(config, h)
self.addCleanup(self._cleanup_cloud_config)
def test_nodepool_provider_config_bad(self):
# nodepool doesn't support clouds.yaml-less config anymore
# Assert that we get a nodepool error and not an os-client-config
# error.
self.assertRaises(
voluptuous.MultipleInvalid,
self.setup_config, 'integration_noocc.yaml')
def test_nodepool_occ_config(self):
configfile = self.setup_config('integration_occ.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': '<PASSWORD>',
'auth_url': 'os_real'}
occ_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(occ_config)
config = nodepool_config.loadConfig(configfile)
self.assertIn('real-provider', config.providers)
pm = provider_manager.get_provider(
config.providers['real-provider'], use_taskmanager=False)
pm.start()
self.assertEqual(pm._client.auth, auth_data)
def test_nodepool_occ_config_reload(self):
configfile = self.setup_config('integration_occ.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': '<PASSWORD>',
'auth_url': 'os_real'}
occ_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(occ_config)
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
# update the config
auth_data['password'] = '<PASSWORD>'
os.remove(self.clouds_path)
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(occ_config, h)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
| StarcoderdataPython |
1798533 | import torch
import torch.utils.data as Data
SCR_LEN = 5 # encoder输入的最大长度
TGT_LEN = 6 # decoder输入的最大长度
def create_data():
# S: decoder输入开始的标志
# E: decoder输出开始的标志
# P: padding用于填充句子
# [encoder_input, decoder_input, decoder_output]
sentences = [['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],
['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']]
# Padding为0
src_vocab = {'P': 0, 'ich': 1, 'mochte': 2, 'ein': 3, 'bier': 4, 'cola': 5}
src_vocab_size = len(src_vocab)
tgt_vocab = {'P': 0, 'i': 1, 'want': 2, 'a': 3, 'beer': 4, 'coke': 5, 'S': 6, 'E': 7, '.': 8}
tgt_vocab_size = len(tgt_vocab)
idx2word = {i: w for i, w in enumerate(tgt_vocab)}
return [sentences, src_vocab, tgt_vocab], src_vocab_size, tgt_vocab_size, idx2word
def make_data(sentences, src_vocab, tgt_vocab):
enc_inputs, dec_inputs, dec_outputs = [], [], []
for i in range(len(sentences)):
enc_input = [[src_vocab[n] for n in sentences[i][0].split()]]
dec_input = [[tgt_vocab[n] for n in sentences[i][1].split()]]
dec_output = [[tgt_vocab[n] for n in sentences[i][2].split()]]
enc_inputs.extend(enc_input)
dec_inputs.extend(dec_input)
dec_outputs.extend(dec_output)
return torch.LongTensor(enc_inputs), torch.LongTensor(dec_inputs), torch.LongTensor(dec_outputs)
class MyDataSet(Data.Dataset):
def __init__(self, enc_inputs, dec_inputs, dec_outputs):
super(MyDataSet, self).__init__()
self.enc_inputs = enc_inputs
self.dec_inputs = dec_inputs
self.dec_outputs = dec_outputs
def __len__(self):
return self.enc_inputs.shape[0]
def __getitem__(self, idx):
return self.enc_inputs[idx], self.dec_inputs[idx], self.dec_outputs[idx]
# if __name__ == '__main__':
# inputs, src_vocab_size, tgt_vocab_size, idx2word = create_data()
# enc_inputs, dec_inputs, dec_outputs = make_data(*inputs)
#
# data_loader = Data.DataLoader(dataset=MyDataSet(enc_inputs, dec_inputs, dec_outputs),
# batch_size=2,
# shuffle=True)
#
# for data in enumerate(data_loader):
# print(data)
| StarcoderdataPython |
1630497 | # Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_TUTORIAL_DIR
import subprocess
import os
import glob
import shutil
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_TUTORIAL_DIR + "/examples/12_system_calls/config"
SAMPLE_SUBMISSIONS = SUBMITTY_TUTORIAL_DIR + "/examples/12_system_calls/submissions/"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
data_path = os.path.join(test.testcase_path, "data")
if os.path.isdir(data_path):
shutil.rmtree(data_path)
os.mkdir(data_path)
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
############################################################################
def cleanup(test):
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "*c")))
subprocess.call(["rm"] + ["-rf"] +
glob.glob(os.path.join(test.testcase_path, "data", "test*")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "results*")))
@testcase
def no_fork(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "no_fork.c"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","no_fork_grade.txt","-b")
test.json_diff("results.json","no_fork_results.json")
test.diff("test02/STDOUT.txt","no_fork_STDOUT.txt")
test.diff("test03/STDOUT.txt","no_fork_STDOUT.txt")
test.empty_file("test02/STDERR.txt")
test.empty_file("test03/STDERR.txt")
test.empty_file("test02/execute_logfile.txt")
test.empty_file("test03/execute_logfile.txt")
@testcase
def serial_fork(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "serial_fork.c"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","serial_fork_grade.txt","-b")
test.json_diff("results.json","serial_fork_results.json")
#test.diff("test02/STDOUT.txt","serial_fork_10_STDOUT.txt")
#test.diff("test03/STDOUT.txt","serial_fork_30_STDOUT.txt")
test.empty_file("test02/STDERR.txt")
test.empty_file("test03/STDERR.txt")
test.empty_file("test02/execute_logfile.txt")
test.empty_file("test03/execute_logfile.txt")
@testcase
def parallel_fork(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "parallel_fork.c"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","parallel_fork_grade.txt","-b")
test.json_diff("results.json","parallel_fork_results.json")
#test.diff("test02/STDOUT.txt","parallel_fork_10_STDOUT.txt")
#test.diff("test03/STDOUT.txt","parallel_fork_30_STDOUT.txt")
test.empty_file("test02/STDERR.txt")
test.empty_file("test03/STDERR.txt")
test.empty_file("test02/execute_logfile.txt")
test.empty_file("test03/execute_logfile.txt")
@testcase
def tree_fork(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "tree_fork.c"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","tree_fork_grade.txt","-b")
test.json_diff("results.json","tree_fork_results.json")
#test.diff("test02/STDOUT.txt","tree_fork_10_STDOUT.txt")
#test.diff("test03/STDOUT.txt","tree_fork_30_STDOUT.txt")
test.empty_file("test02/STDERR.txt")
test.empty_file("test03/STDERR.txt")
test.empty_file("test02/execute_logfile.txt")
test.empty_file("test03/execute_logfile.txt")
#@testcase
#def fork_bomb_print(test):
# cleanup(test)
# subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "fork_bomb_print.c"),
# os.path.join(test.testcase_path, "data")])
# test.run_compile()
# test.run_run()
# test.run_validator()
# test.diff("grade.txt","fork_bomb_print_grade.txt","-b")
# test.json_diff("results.json","fork_bomb_print_results.json")
# #test.diff("test02/STDOUT.txt","fork_bomb_print_10_STDOUT.txt")
# #test.diff("test03/STDOUT.txt","fork_bomb_print_30_STDOUT.txt")
# test.empty_file("test02/STDERR.txt")
# test.empty_file("test03/STDERR.txt")
# test.empty_file("test02/execute_logfile.txt")
# test.empty_file("test03/execute_logfile.txt")
| StarcoderdataPython |
12044 | #!/usr/bin/env python3
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
import pickle
import shutil
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
TB_DIR = os.path.join(os.getcwd(), "gan-tb")
SPRITE_IMAGE_FILENAME = os.path.join(TB_DIR, "sprite.png")
def save_tb_embeddings(embeddings_filename):
f = open(embeddings_filename, 'rb')
embeddings = pickle.load(f)
images = embeddings['images']
zs = embeddings['zs']
# overwrite Tensorboard log dir if necessary
if os.path.exists(TB_DIR):
shutil.rmtree(TB_DIR)
os.makedirs(TB_DIR)
# create grid image
img_width, img_height = save_sprite_image(images)
with tf.device('cpu:0'):
# create embedding var
embedding_var = tf.Variable(initial_value=zs)
# save projector config
summary_writer = tf.summary.FileWriter(TB_DIR)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
embedding.sprite.single_image_dim.extend([img_width, img_height])
projector.visualize_embeddings(summary_writer, config)
# save embeddings
sess = tf.Session()
sess.run(embedding_var.initializer)
saver = tf.train.Saver([embedding_var])
saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
def save_sprite_image(images):
n_embeddings = images.shape[0]
grid_cols = int(np.sqrt(n_embeddings))
grid_rows = int(np.ceil(float(n_embeddings) / grid_cols))
img_height, img_width, img_channels = images[0].shape
grid_image = np.empty((img_height * grid_rows, img_width * grid_cols, img_channels))
for i, image in enumerate(images):
row = i / grid_cols
col = i % grid_cols
x = img_width * col
y = img_height * row
grid_image[y:y + img_height, x:x + img_width] = image
grid_image = PIL.Image.fromarray(grid_image.astype('uint8'))
grid_image.save(SPRITE_IMAGE_FILENAME)
return img_width, img_height
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'embeddings_file',
help='Embeddings pickle file')
args = vars(parser.parse_args())
try:
save_tb_embeddings(
args['embeddings_file'],
)
except Exception as e:
print(('%s: %s' % (type(e).__name__, e.message)))
raise
| StarcoderdataPython |
112047 | def print_line():
print("-" * 60)
def print_full_header(build_step_name):
print_line()
print(" Build Step /// {}".format(build_step_name))
def print_footer():
print_line()
def log(level, data):
print("{0}: {1}".format(level, data))
| StarcoderdataPython |
3308317 | # Importar librerías
import numpy as np
from matplotlib import pyplot as plt
# Definir e incluir nuevas funciones al cuaderno
def _buscar_intervalos(fun, ini, fin):
""" Método para buscar intervalos en los que ocurra cambio de signo.
## Parámetros:
fun (function): función para analizar.
ini (int): inicio del análisis.
fin (int): limite final del análisis.
## Devoluciones:
I (list): Lista de tuplas con los intervalos donde hay un cero.
"""
i = ini - 1 # Variable para contar iteraciones
I = [] # Variable para almacenar los intervalos
while i < fin:
i += 1
if fun(i) == 0:
I.append((i, i))
elif fun(i) * fun(i+1) < 0:
I.append((i, i+1))
else:
pass
return I
# Función de trabajo
def F(x):
y = np.exp(3*x - 12) + x * np.cos(3*x) - x**2 + 7.15
return y
def pos(inicio, final, porcentaje):
delta = final - inicio
pos = inicio + delta*porcentaje/100
return pos
# Intervalo de estudio
a = -4
b = 6
Fa = F(a)
Fb = F(b)
x = np.linspace(a, b, 1024)
Intervalos = _buscar_intervalos(F, a, b)
# Gráfica
plt.axhline(y = 0, color="gray")
plt.plot(x, F(x))
plt.text(pos(a, b, 5), pos(Fa, Fb, 95), f"Intervalos = {Intervalos}")
plt.grid()
plt.show()
# Método de la bisección
# ----------------------
Tolerancia = 1e-4
Iteraciones = 20
# Limites
Xi=Intervalos[0][0]
Xs=Intervalos[0][1]
# Iteraciones
for No in range(Iteraciones):
if abs(Xs-Xi) < Tolerancia: break
Xm = (Xs+Xi)/2
ea = abs(Xs-Xi)/2
plt.axhline(y = 0, color="gray")
plt.plot(x, F(x))
plt.axvline(x = Xs, color="#F00")
plt.axvline(x = Xm, color="#00F")
plt.axvline(x = Xi, color="#F00")
plt.text(pos(a, b, 40), pos(Fa, Fb, 90), f"Xm = {Xm}\nea = {ea}")
plt.grid()
plt.show()
if F(Xi)*F(Xm) < 0:
Xs = Xm
else:
Xi = Xm
| StarcoderdataPython |
3380361 | <reponame>FerdinandKlingenberg/TestAvSentinel-2Python
# -*- coding: utf-8 -*-
import rasterio
import cv2
#import numpy as np
outfile = r'C:\Users\Ferdinand\Documents\imageEnhance_to24bit\Resultater\rasterio\GDAL_Composite8bitWithOpenCV.tif'
#url to the bands
b4 = r'C:\Users\Ferdinand\Documents\imageEnhance_to24bit\Originalscener\B04.jp2'
b3 = r'C:\Users\Ferdinand\Documents\imageEnhance_to24bit\Originalscener\B03.jp2'
b2 = r'C:\Users\Ferdinand\Documents\imageEnhance_to24bit\Originalscener\B02.jp2'
gdalLaget = r'C:\Users\Ferdinand\Documents\imageEnhance_to24bit\Resultater\OTB\exportOTB_Composite8bit.tif'
#open the bands (I can't believe how easy is this with rasterio!)
with rasterio.open(b4) as red:
RED = red.read()
with rasterio.open(b3) as green:
GREEN = green.read()
with rasterio.open(b2) as blue:
BLUE = blue.read()
with rasterio.open(gdalLaget) as comp:
COMP = comp.read()
#compute the ndvi
#ndvi = (NIR-RED)/(NIR+RED)
#ndvi = (NIR.astype(float) - RED.astype(float)) / (NIR+RED)
#print(ndvi.min(), ndvi.max()) The problem is alredy here
outputImg8U = cv2.convertScaleAbs(COMP, alpha=(255.0/65535.0))
profile = comp.meta
profile.update(driver='GTiff')
#profile.update(dtype=rasterio.float32)
profile.update(dtype=rasterio.uint8)
#profile.update(count=3)
with rasterio.open(outfile, 'w', **profile) as dst:
# dst.write(outputImg8U)
dst.write(outputImg8U.astype(rasterio.uint8))
# dst.write(ndvi.astype(rasterio.float32))
| StarcoderdataPython |
3342263 | import boto3
def describe_default_vpc(client):
response = client.describe_vpcs(
Filters=[ {
'Name': 'isDefault',
'Values': [
'true',
]
} ],
)
return response.get('Vpcs', [{}])[0].get('VpcId', '')
| StarcoderdataPython |
105795 | import json
import os
from django.shortcuts import render, get_object_or_404, get_list_or_404
from django.http import HttpResponse, JsonResponse
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from .models import (
MediaFile,
ImagePrediction,
AudioPrediction,
VideoPrediction,
VideoLabel,
AudioLabel,
Subtitle,
)
# Create your views here.
def index(request):
return HttpResponse("Hello, world. You're at the labels index.")
def predictions(request, prediction_id: int):
response = "response for prediction %s"
return HttpResponse(response % prediction_id)
def files_index(request):
# file_list = get_list_or_404(MediaFile)
file_list = MediaFile.objects.all()
context = {"file_list": file_list}
return render(request, "media_files/index.html", context)
def files_show(request, file_id: int):
file = get_object_or_404(MediaFile, pk=file_id)
# image_labels = file.imagelabel_set.all()
# imageprediction_set.all() will not work here because of the way the predictions sub-class labels
video_labels = VideoLabel.objects.filter(
media_file__id=file_id, videoprediction__isnull=True
)
audio_labels = AudioLabel.objects.filter(
media_file__id=file_id, audioprediction__isnull=True
)
image_predictions = ImagePrediction.objects.filter(media_file__id=file_id)
video_predictions = VideoPrediction.objects.filter(media_file__id=file_id)
audio_predictions = AudioPrediction.objects.filter(media_file__id=file_id)
subtitles = Subtitle.objects.filter(media_file__id=file_id)
data = {
"title": file.name,
"subtitles": [s for s in subtitles],
"labels": [
{
"classifier": l.classification.name,
"time": l.time,
"x": l.x,
"y": l.y,
"width": l.width,
"height": l.height,
}
for l in video_labels
]
+ [
{
"classifier": l.classification.name,
"time": l.time,
"duration": l.duration,
}
for l in audio_labels
],
"predictions": sorted(
[
{
"confidence": p.confidence,
"classifier": p.classification.name,
"model": str(p.model_version),
"x": p.x,
"y": p.y,
"width": p.width,
"height": p.height,
}
for p in image_predictions
]
+ [
{
"confidence": p.confidence,
"classifier": p.classification.name,
"model": str(p.model_version),
"time": p.time,
"x": p.x,
"y": p.y,
"width": p.width,
"height": p.height,
}
for p in video_predictions
]
+ [
{
"confidence": p.confidence,
"classifier": p.classification.name,
"model": str(p.model_version),
"time": p.time,
"duration": p.duration,
}
for p in audio_predictions
],
key=lambda p: p["time"],
),
}
if file.url:
data["sourceUrl"] = file.url
return render(
request,
"media_files/show.html",
{
"file": file,
"image_predictions": image_predictions,
"audio_predictions": audio_predictions,
"video_predictions": video_predictions,
"data": data,
"json_data": json.dumps(data),
},
)
def files_compare(request, file_id: int):
file = get_object_or_404(MediaFile, pk=file_id)
# Ground truths
ground_truth_timecodes = {}
for label in list(
VideoLabel.objects.filter(media_file__id=file_id, videoprediction__isnull=True)
) + list(
AudioLabel.objects.filter(media_file__id=file_id, audioprediction__isnull=True)
):
timecode = label.timecode()
if timecode not in ground_truth_timecodes:
ground_truth_timecodes[timecode] = []
label_data = {
"type": "audio" if hasattr(label, "duration") else "video",
"file": label.file,
"tag": label.classification.name,
}
if hasattr(label, "duration"):
label_data["duration"] = label.duration
else:
label_data["box"] = {
"x1": label.x,
"y1": label.y,
"x2": label.x + label.width,
"y2": label.y + label.height,
}
ground_truth_timecodes[timecode].append(label_data)
# Predictions
video_predictions = VideoPrediction.objects.filter(media_file__id=file_id)
audio_predictions = AudioPrediction.objects.filter(media_file__id=file_id)
model_versions = list(
set(
[p.model_version for p in video_predictions]
+ [p.model_version for p in audio_predictions]
)
)
predictions_data = []
for model_version in model_versions:
is_video_model = (
VideoPrediction.objects.filter(
media_file__id=file_id, model_version__id=model_version.id
).count()
> 0
)
timecodes = {}
for prediction in list(
VideoPrediction.objects.filter(
media_file__id=file_id, model_version__id=model_version.id
)
) + list(
AudioPrediction.objects.filter(
media_file__id=file_id, model_version__id=model_version.id
)
):
timecode = prediction.timecode()
if timecode not in timecodes:
timecodes[timecode] = []
prediction_dict = {
"file": prediction.file,
"tag": prediction.classification.name,
"score": prediction.confidence,
}
if hasattr(prediction, "duration"):
prediction_dict["duration"] = prediction.duration
else:
prediction_dict["box"] = (
{
"x1": prediction.x,
"x2": prediction.width + prediction.x,
"y1": prediction.y,
"y2": prediction.height + prediction.y,
},
)
timecodes[timecode].append(prediction_dict)
inference_data = {
"title": file.name,
"url": file.url,
"modelType": model_version.model.name,
"modelVersion": model_version.version,
"modelDescription": model_version.description,
"modelClass": "video" if is_video_model else "audio",
"timecodes": timecodes,
}
predictions_data.append(inference_data)
return render(
request,
"media_files/compare.html",
{
"data": json.dumps(
{
"groundTruth": {
"title": file.name,
"url": file.url,
"timecodes": ground_truth_timecodes,
},
"predictions": predictions_data,
},
indent=2,
).replace("\\", "\\\\")
},
)
# return JsonResponse(
# {
# "groundTruth": {
# "title": file.name,
# "url": file.url,
# "timecodes": ground_truth_timecodes,
# },
# "predictions": predictions_data,
# }
# )
| StarcoderdataPython |
1694582 | # MIT License
#
# Copyright (c) 2020 Gcom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# verRegister.py
#
# This module define a object that deal with Revisions
import asyncio
import django.db.utils
import re
import gitlab
import json
import manager.master.configs as cfg
from concurrent.futures import ProcessPoolExecutor
from asgiref.sync import sync_to_async
from django.http import HttpRequest
from manager.basic.info import Info
from typing import Any, Optional, Dict, cast
from manager.basic.type import Error
from manager.basic.mmanager import ModuleDaemon
from manager.models import Revisions, make_sure_mysql_usable
revSyncner = None
M_NAME = "RevSyncner"
class RevSync(ModuleDaemon):
def __init__(self) -> None:
global M_NAME
ModuleDaemon.__init__(self, M_NAME)
self._stop = False
# This queue will fill by new revision that merged into
# repository after RevSync started and RevSync will
# put such revision into model so the revision database
# will remain updated
self.revQueue = asyncio.Queue(10) # type: asyncio.Queue
async def begin(self) -> None:
await self.revDBInit()
async def cleanup(self) -> None:
return None
def needStop(self) -> bool:
return self._stop
@staticmethod
def _connectToGitlab() -> Optional[gitlab.Gitlab]:
assert(cfg.config is not None)
cfgs = cfg.config
url = cfgs.getConfig('GitlabUrl')
token = cfgs.getConfig('PrivateToken')
if url == "" or token == "":
return Error
ref = gitlab.Gitlab(url, token)
ref.auth()
return ref
@staticmethod
def _retrive_revisions() -> Optional[Any]:
assert(cfg.config is not None)
ref = RevSync._connectToGitlab()
projId = cfg.config.getConfig("Project_ID")
if ref is None:
return None
return ref.projects.get(projId).commits.list(all=True)
async def revTransfer(rev, tz):
revision = Revisions(
sn=rev.id, author=rev.author_name,
comment=rev.message, dateTime=rev.committed_date)
await sync_to_async(revision.save)()
return revision
# format of offset if "+08:00"
@staticmethod
def timeFormat(timeStr: str, offset: str) -> str:
return timeStr
pattern = "([0-9]*-[0-9]*-[0-9]*T[0-9]*:[0-9]*:[0-9]*)"
m = re.search(pattern, timeStr)
if m is None:
return ""
formatDate = m.group()
formatDate = formatDate.replace("T", " ")
return formatDate + offset
async def revDBInit(self) -> bool:
print("RevDB Init...")
loop = asyncio.get_running_loop()
e = ProcessPoolExecutor()
revisions = await loop.run_in_executor(e, self._retrive_revisions)
if revisions is None:
raise VERSION_DB_INIT_FAILED()
# Remove old datas of revisions cause these data may out of date
# repository may be rebased so that the structure of it is very
# different with datas in database so just remove these data and
# load from server again
import traceback
import sys
try:
await sync_to_async(Revisions.objects.all().delete)()
# Fill revisions just retrived into model
config = cast(Info, cfg.config)
tz = config.getConfig('TimeZone')
for rev in revisions:
await RevSync.revTransfer(rev, tz)
print("Done")
sys.stdout.flush()
except django.db.utils.ProgrammingError:
traceback.print_exc()
return False
except Exception:
traceback.print_exc()
pass
print("RevDB Init success.")
return True
def revNewPush(self, rev: HttpRequest) -> bool:
self.revQueue.put_nowait(rev)
return True
def gitlabWebHooksChecking(self, request: HttpRequest) -> Optional[Dict]:
headers = request.headers # type: ignore
if 'X-Gitlab-Event' not in headers:
print("X-Gitlab-Event not found")
return None
try:
contentType = headers['Content-Type']
event = headers['X-Gitlab-Event']
if contentType == 'application/json' \
and event == 'Merge Request Hook':
body = json.loads(request.body) # type: ignore
state = body['object_attributes']['state']
if state == 'merged':
return body
except Exception:
return None
return None
async def _requestHandle(self, request: HttpRequest) -> bool:
# Premise of these code work correct is a merge request
# contain only a commit if it can't be satisfied
# should read gitlab when merge event arrived and
# compare with database then append theses new into
# database
body = body = self.gitlabWebHooksChecking(request)
if body is None:
return False
last_commit = body['object_attributes']['last_commit']
sn_ = last_commit['id']
author_ = last_commit['author']['name']
comment_ = last_commit['message']
date_time_ = last_commit['timestamp']
make_sure_mysql_usable()
rev = Revisions(sn=sn_, author=author_, comment=comment_,
dateTime=date_time_)
await sync_to_async(rev.save, thread_sensitive=True)()
return True
# Process such a database related operation on background
# is for the purpose of quick response to where request come
# from. Responsbility will gain more benefit if such operation
# to be complicated.
async def run(self) -> None:
while True:
if self._stop is True:
return None
try:
request = await self.revQueue.get()
await self._requestHandle(request)
except Exception as e:
print(e)
class VERSION_DB_INIT_FAILED(Exception):
def __str__(self) -> str:
return "Version Database failed to init"
| StarcoderdataPython |
45759 | import copy
import logging
from datetime import datetime, timedelta
from collections import namedtuple
from blinker import Signal
__all__ = [
'Event',
'TrainingMachineObserver',
'TrainingMachine',
]
logger = logging.getLogger(__name__)
class Event(dict):
""" Events that are expected by the process_event function.
Use the factory methods to create appropriate events.
"""
def __init__(self, type, **kwargs):
super().__init__(type=type, **kwargs)
@property
def type(self):
return self['type']
@property
def index(self):
return self.get('index')
@property
def char(self):
return self.get('char')
@classmethod
def input_event(cls, index, char):
return cls(type='input', index=index, char=char)
@classmethod
def undo_event(cls, index):
""" Create an undo event.
:param index: The index right of the char that should be reverted.
"""
return cls(type='undo', index=index)
@classmethod
def pause_event(cls):
return cls(type='pause')
@classmethod
def unpause_event(cls):
return cls(type='unpause')
@classmethod
def restart_event(cls):
return cls(type='restart')
class TrainingMachineObserver(object):
""" TrainingMachine observer interface.
A client should implement this interface to get feedback from the machine.
"""
def on_pause(self, sender):
raise NotImplementedError
def on_unpause(self, sender):
raise NotImplementedError
def on_hit(self, sender, index, typed):
raise NotImplementedError
def on_miss(self, sender, index, typed, expected):
raise NotImplementedError
def on_undo(self, sender, index, expect):
""" Called after a successful undo event.
:param sender: The sending machine.
:param index: The index that should be replaced by the expect argument.
:param expect: The expected character.
"""
raise NotImplementedError
def on_end(self, sender):
raise NotImplementedError
def on_restart(self, sender):
raise NotImplementedError
class Char(object):
KeyStroke = namedtuple('KeyStroke', ['char', 'time'])
def __init__(self, idx, char, undo_typo):
""" Internal representation of a character in the text of a lesson.
An additional list of all key strokes at this index is maintained.
:param idx: The absolute index in the text starting at 0.
:param char: The utf-8 character in the text.
:param undo_typo: Should undos (<UNDO>) counts as typos.
"""
self._idx = idx
self._char = char
self._keystrokes = list()
self._undo_typo = undo_typo
@property
def index(self):
return self._idx
@property
def char(self):
return self._char
@property
def hit(self):
""" Is the last recorded key stroke a hit?
:return: True on hit, else False.
"""
return self._keystrokes[-1].char == self._char if self._keystrokes else False
@property
def miss(self):
""" Is the last recorded key stroke a miss?
:return: True on miss, else False.
"""
return not self.hit
@property
def keystrokes(self):
return self._keystrokes
@property
def typos(self):
return [ks for ks in self._keystrokes if (ks.char != '<UNDO>' and ks.char != self._char) or (ks.char == '<UNDO>' and self._undo_typo)]
def append(self, char, elapsed):
self._keystrokes.append(Char.KeyStroke(char, elapsed))
def __getitem__(self, item):
return self._keystrokes[item].char
def __iter__(self):
for ks in self._keystrokes:
yield ks
class TrainingMachine(object):
PauseEntry = namedtuple('PauseEntry', ['action', 'time'])
def __init__(self, text, auto_unpause=False, undo_typo=False, **kwargs):
""" Training machine.
A client should never manipulate internal attributes on its instance.
Additional kwargs are added to the instance dict and can later be accessed as attributes.
Note that the logic is currently initialized with paused state. In case auto_unpause is False
the logic must first be unpaused by passing an unpause event to start the state machine.
If auto_unpause is True, the machine automatically switches state to input on first input event.
In either case an on_unpause callback is made that the gui can use to detect the start of the training
session.
:param text: The lesson text.
:param undo_typo: If enabled wrong undos count as typos.
:param auto_unpause: True to enable the auto transition from pause to input on input event.
"""
# Ensure the text ends with NL
if not text.endswith('\n'):
text += '\n'
self._state_fn = self._state_pause
self._text = [Char(i, c, undo_typo) for i, c in enumerate(text)]
self._pause_history = list()
self._observers = list()
self.auto_unpause = auto_unpause
self.undo_typo = undo_typo
self.__dict__.update(kwargs)
@classmethod
def from_lesson(cls, lesson, **kwargs):
""" Create a :class:`TrainingMachine` from the given :class:`Lesson`.
Additional arguments are passed to the context. The lesson is appended to the context.
:param lesson: A :class:`Lesson`.
:return: An instance of :class:`TrainingMachine`.
"""
return cls(lesson.text, lesson=lesson, **kwargs)
def add_observer(self, observer):
""" Add an observer to the given machine.
:param observer: An object implementing the :class:`TrainingMachineObserver` interface.
"""
if observer not in self._observers:
self._observers.append(observer)
def remove_observer(self, observer):
""" Remove an observer from the given machine.
:param observer: An object implementing the :class:`TrainingMachineObserver` interface.
"""
self._observers.remove(observer)
def process_event(self, event):
""" Process external event.
:param event: An event.
"""
logger.debug('processing event: {}'.format(event))
self._state_fn(event)
@property
def paused(self):
return self._state_fn is self._state_pause
@property
def running(self):
return not self.paused and self._state_fn is not self._state_end
def _keystrokes(self):
for char in self._text:
for ks in char:
yield ks
@property
def keystrokes(self):
return len([ks for ks in self._keystrokes() if ks.char != '<UNDO>'])
@property
def hits(self):
return len([char for char in self._text if char.hit])
@property
def progress(self):
rv = self.hits / len(self._text)
return rv
def elapsed(self):
""" Get the overall runtime.
:return: The runtime as :class:`datetime.timedelta`
"""
if not self._pause_history:
return timedelta(0)
# Sort all inputs by input time
# keystrokes = sorted(self._keystrokes(), key=lambda ks: ks.time)
overall = datetime.utcnow() - self._pause_history[0].time
pause_time = timedelta(0)
# make a deep copy of the pause history
history = copy.deepcopy(self._pause_history)
# pop last event if we are still running or just started
if history[-1].action in ['start', 'unpause']:
history.pop()
def pairs(iterable):
it = iter(iterable)
return zip(it, it)
for start, stop in pairs(history):
pause_time += (stop.time - start.time)
return overall - pause_time
def _notify(self, method, *args, **kwargs):
for observer in self._observers:
getattr(observer, method)(self, *args, **kwargs)
def _reset(self):
self._state_fn = self._state_pause
for char in self._text:
char.keystrokes.clear()
def _state_input(self, event):
if event.type == 'pause':
self._state_fn = self._state_pause
self._pause_history.append(TrainingMachine.PauseEntry('pause', datetime.utcnow()))
self._notify('on_pause')
elif event.type == 'undo':
if event.index > 0:
self._text[event.index - 1].append('<UNDO>', self.elapsed())
# report wrong undos if desired
if self.undo_typo:
self._notify('on_miss', event.index - 1, '<UNDO>', self._text[event.index - 1].char)
self._notify('on_undo', event.index - 1, self._text[event.index - 1].char)
elif event.type == 'input':
# Note that this may produce an IndexError. Let it happen! It's a bug in the caller.
if self._text[event.index].char == event.char: # hit
self._text[event.index].append(event.char, self.elapsed())
self._notify('on_hit', event.index, event.char)
if event.index == self._text[-1].index:
self._state_fn = self._state_end
self._pause_history.append(TrainingMachine.PauseEntry('stop', datetime.utcnow()))
self._notify('on_end')
else: # miss
if self._text[event.index].char == '\n': # misses at line ending
return # TODO: Make misses on line ending configurable
if event.char == '\n': # 'Return' hits in line
# TODO: Make misses on wrong returns configurable
return
self._text[event.index].append(event.char, self.elapsed())
self._notify('on_miss', event.index, event.char, self._text[event.index].char)
def _state_pause(self, event):
if event.type == 'unpause' or (event.type == 'input' and self.auto_unpause):
self._state_fn = self._state_input
if self._pause_history:
# Only append start time if we've already had a pause event.
# Currently we're detecting the start view first keystroke time.
self._pause_history.append(TrainingMachine.PauseEntry('unpause', datetime.utcnow()))
else:
self._pause_history.append(TrainingMachine.PauseEntry('start', datetime.utcnow()))
self._notify('on_unpause')
if event.type == 'input' and self.auto_unpause:
# Auto transition to input state
self._state_input(event)
def _state_end(self, event):
if event.type == 'restart':
self._reset()
self._notify('on_restart')
| StarcoderdataPython |
1654484 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: subscription_manifest
version_added: 1.0.0
short_description: Manage Subscription Manifests
description:
- Upload, refresh and delete Subscription Manifests
author: "<NAME> (@akofink)"
options:
manifest_path:
description:
- Path to the manifest zip file
- This parameter will be ignored if I(state=absent) or I(state=refreshed)
type: path
state:
description:
- The state of the manifest
default: present
choices:
- absent
- present
- refreshed
type: str
repository_url:
description:
- URL to retrieve content from
aliases: [ redhat_repository_url ]
type: str
extends_documentation_fragment:
- redhat.satellite.foreman
- redhat.satellite.foreman.organization
'''
EXAMPLES = '''
- name: "Upload the RHEL developer edition manifest"
redhat.satellite.subscription_manifest:
username: "admin"
password: "<PASSWORD>"
server_url: "https://satellite.example.com"
organization: "Default Organization"
state: present
manifest_path: "/tmp/manifest.zip"
'''
RETURN = ''' # '''
from ansible_collections.redhat.satellite.plugins.module_utils.foreman_helper import KatelloEntityAnsibleModule
def main():
module = KatelloEntityAnsibleModule(
argument_spec=dict(
manifest_path=dict(type='path'),
state=dict(default='present', choices=['absent', 'present', 'refreshed']),
repository_url=dict(aliases=['redhat_repository_url']),
),
foreman_spec=dict(
organization=dict(type='entity', required=True, thin=False),
),
required_if=[
['state', 'present', ['manifest_path']],
],
supports_check_mode=False,
)
module.task_timeout = 5 * 60
with module.api_connection():
organization = module.lookup_entity('organization')
scope = module.scope_for('organization')
try:
existing_manifest = organization['owner_details']['upstreamConsumer']
except KeyError:
existing_manifest = None
if module.state == 'present':
if 'repository_url' in module.foreman_params:
payload = {'redhat_repository_url': module.foreman_params['repository_url']}
org_spec = dict(id=dict(), redhat_repository_url=dict())
organization = module.ensure_entity('organizations', payload, organization, state='present', foreman_spec=org_spec)
try:
with open(module.foreman_params['manifest_path'], 'rb') as manifest_file:
files = {'content': (module.foreman_params['manifest_path'], manifest_file, 'application/zip')}
params = {}
if 'repository_url' in module.foreman_params:
params['repository_url'] = module.foreman_params['repository_url']
params.update(scope)
result = module.resource_action('subscriptions', 'upload', params, files=files, record_change=False, ignore_task_errors=True)
for error in result['humanized']['errors']:
if "same as existing data" in error:
# Nothing changed, but everything ok
break
if "older than existing data" in error:
module.fail_json(msg="Manifest is older than existing data.")
else:
module.fail_json(msg="Upload of the manifest failed: %s" % error)
else:
module.set_changed()
except IOError as e:
module.fail_json(msg="Unable to read the manifest file: %s" % e)
elif module.desired_absent and existing_manifest:
module.resource_action('subscriptions', 'delete_manifest', scope)
elif module.state == 'refreshed':
if existing_manifest:
module.resource_action('subscriptions', 'refresh_manifest', scope)
else:
module.fail_json(msg="No manifest found to refresh.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
115802 | import cv2
import numpy as np
import chili_tag_detector as ctd
import sys
import time
from behaviours.box_detection.utils import calculate_angle_and_distance
#from utils import calculate_angle_and_distance
#1227^2 + 136^2 = sqrt(1524025) = 1309.83 1234.51
#900^2 +136^2 = sqrt(828496) = 910.21 1004.99
#600^2 + 136^2 = sqrt(378496) = 615.22 623.26
def get_box_distance(frame):
'''
This function does the detection of chili tags. It expects the image as an argument and gives back all the tags that are detected.
Arguments:
frame - the image coming from the camera that is used to detect the chili tags from
Returns:
list in list with marker_id (cluster number), distance in mm and angle
'''
# cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)640, height=(int)480,format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
# ret, frame = cap.read()
markers = ctd.detect(frame)
height, width, channels = frame.shape
marker_distances = []
for marker in markers:
marker_id = "box_" + str(marker[0])
print(marker_id)
marker_coor = marker[1]
x2 = marker_coor[0][0]
y1 = marker_coor[0][1]
x1 = marker_coor[2][0]
y2 = marker_coor[2][1]
angle, distance = calculate_angle_and_distance(width,x1,x2,y1,y2)
print("Angle " + str(angle) + " dist " + str(distance))
marker_distances.append([marker_id, distance, angle])
return marker_distances
if __name__ == '__main__':
cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)640, height=(int)480,format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
ret, frame = cap.read()
print(get_box_distance(frame))
| StarcoderdataPython |
4813862 | import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset
from plotly.subplots import make_subplots
import plotly.graph_objects as go
def visualize3D(vis_net, x, y, dir1, dir2, dir3, len1 = 1, len2 = 1, len3 = 1, show_figure = True, save_figure = False, file_path = './temp.html'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Normalize the three directions
print('Take three orthogonal directions')
dir1 = dir1/torch.norm(dir1, p = float('2'))
dir2 = dir2/torch.norm(dir2, p = float('2'))
dir3 = dir3/torch.norm(dir3, p = float('2'))
# Check if the three directions are orthogonal to each other
inner_product1 = torch.abs(torch.dot(dir1.view(-1), dir2.view(-1)))
inner_product2 = torch.abs(torch.dot(dir1.view(-1), dir3.view(-1)))
inner_product3 = torch.abs(torch.dot(dir2.view(-1), dir3.view(-1)))
check_inner_product1 = (inner_product1<0.01).item()
check_inner_product2 = (inner_product2<0.01).item()
check_inner_product3 = (inner_product3<0.01).item()
assert check_inner_product1, "The three directions are not orthogonal"
assert check_inner_product2, "The three directions are not orthogonal"
assert check_inner_product3, "The three directions are not orthogonal"
# Generate the visualization and data grid
#lenx, leny, lenz = 51, 51, 51
xx, yy, zz = np.mgrid[-len1:len1:50j, -len2:len2:50j, -len3:len3:50j]
t = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
vis_grid = torch.from_numpy(t).float().to(device)
dirs_mat = torch.cat([dir1.reshape(1, -1), dir2.reshape(1, -1), dir3.reshape(1, -1)]).to(device)
x_grid = torch.mm(vis_grid, dirs_mat).reshape(len(vis_grid), 3, 32, 32).to('cpu') + x
grid_output = []
grid_loader = torch.utils.data.DataLoader(TensorDataset(x_grid), batch_size=64, shuffle=False, num_workers=2)
vis_net.eval()
softmax1 = nn.Softmax()
for grid_points in tqdm(grid_loader):
grid_points = grid_points[0].to(device)
grid_ys = vis_net(grid_points)
grid_ys = softmax1(grid_ys)
grid_ys = grid_ys[:,y].detach().cpu().numpy()
grid_output.append(grid_ys)
y_pred0 = np.concatenate(grid_output)
# and plot everything
fig = go.Figure(data=go.Volume(
x=xx.flatten(),
y=yy.flatten(),
z=zz.flatten(),
value=y_pred0.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
))
if show_figure:
fig.show()
if save_figure:
plotly.offline.plot(fig, filename=file_path)
return fig
def Assert_three_orthogonal(dirs):
dir1, dir2, dir3 = dirs[0], dirs[1], dirs[2]
# Check if the three directions are orthogonal to each other
inner_product1 = torch.abs(torch.dot(dir1.view(-1), dir2.view(-1)))
inner_product2 = torch.abs(torch.dot(dir1.view(-1), dir3.view(-1)))
inner_product3 = torch.abs(torch.dot(dir2.view(-1), dir3.view(-1)))
check_inner_product1 = (inner_product1<0.01).item()
check_inner_product2 = (inner_product2<0.01).item()
check_inner_product3 = (inner_product3<0.01).item()
assert check_inner_product1, "The three directions are not orthogonal"
assert check_inner_product2, "The three directions are not orthogonal"
assert check_inner_product3, "The three directions are not orthogonal"
def Compute_grid_outputs(vis_net, x, y, dirs, lens=[[-1,1],[-1,1],[-1,1]], resolution = "high"):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Generate the visualization and data grid
if resolution == "high":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:50j, lens[1][0]:lens[1][1]:50j, lens[2][0]:lens[2][1]:50j]
elif resolution == "medium":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:20j, lens[1][0]:lens[1][1]:20j, lens[2][0]:lens[2][1]:20j]
elif resolution == "low":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:8j, lens[1][0]:lens[1][1]:8j, lens[2][0]:lens[2][1]:8j]
else:
raise NameError('The resolution has to be either high, medium, or low.')
t = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
vis_grid = torch.from_numpy(t).float().to(device)
dirs_mat = torch.cat([dirs[0].reshape(1, -1), dirs[1].reshape(1, -1), dirs[2].reshape(1, -1)]).to(device)
x_grid = torch.mm(vis_grid, dirs_mat).reshape(len(vis_grid), 3, 32, 32).to('cpu')
x_grid = x_grid+ x
grid_output = []
grid_loader = torch.utils.data.DataLoader(TensorDataset(x_grid), batch_size=64, shuffle=False, num_workers=2)
vis_net.eval()
softmax1 = nn.Softmax()
for grid_points in tqdm(grid_loader):
grid_points = grid_points[0].to(device)
grid_ys = vis_net(grid_points)
grid_ys = softmax1(grid_ys)
grid_ys = grid_ys[:,y].detach().cpu().numpy()
grid_output.append(grid_ys)
y_pred0 = np.concatenate(grid_output)
return xx.flatten(), yy.flatten(), zz.flatten(), y_pred0.flatten()
def find_specific_class(specific_class, labels):
img_ind = -1
for img_ind in range(labels.shape[0]):
if labels[img_ind] == specific_class:
break
return img_ind
def run_many(PGD_attack,
data_loader,
model,
subplot_grid = [2,2],
num_adv_directions = 1,
lens = [[-1,1],[-1,1],[-1,1]],
resolution = "high",
height = 1000,
width = 1000,
show_figure = False,
save_figure = False,
file_path = './temp.html',
specific_class = -1,
title = "",
if_back_to_cpu = False):
# Create a figure grid
fig = make_subplots(
rows=subplot_grid[0], cols=subplot_grid[1],
specs = [[{'type': 'volume'} for _ in range(subplot_grid[1])] for ind2 in range(subplot_grid[0])])
num_sub_figures_plotted = 0
for i, (images, labels) in enumerate(data_loader):
if if_back_to_cpu:
images = images.cpu()
labels = labels.cpu()
num_figures_3D = subplot_grid[0]*subplot_grid[1]
if num_sub_figures_plotted < num_figures_3D:
print(f"Plotting figure {num_sub_figures_plotted+1}/{num_figures_3D}.")
if specific_class == -1:
# This means that we do not need to find a specific class
img_ind = 0
else:
img_ind = find_specific_class(specific_class, labels)
if img_ind == -1:
# This means that this batch does not contain any image of this particular class
print("No img of label {0}! Go to the next batch.".format(specific_class))
# So, go to the next batch
continue
x = images[img_ind]
y = labels[img_ind]
dirs = [0, 0, 0]
if num_adv_directions == 0:
print("The number of adversarial directions is 0")
dirs[0] = torch.rand(x.shape) - 0.5
dirs[1] = torch.rand(x.shape) - 0.5
dirs[2] = torch.rand(x.shape) - 0.5
elif num_adv_directions == 1:
print("The number of adversarial directions is 1")
labels_change = torch.randint(1, 10, (labels.shape[0],))
wrong_labels = torch.remainder(labels_change + labels, 10)
adv_images = PGD_attack.__call__(images, wrong_labels)
dirs[0] = adv_images[img_ind].cpu() - x
dirs[1] = torch.rand(x.shape) - 0.5
dirs[2] = torch.rand(x.shape) - 0.5
elif num_adv_directions == 3:
print("The number of adversarial directions is 3")
for dir_ind in range(3):
labels_change = torch.ones(labels.shape[0]) * (dir_ind+1)
labels_change = labels_change.long()
wrong_labels = torch.remainder(labels_change + labels, 10)
adv_images = PGD_attack.__call__(images, wrong_labels)
dirs[dir_ind] = adv_images[img_ind].cpu() - x
else:
raise NameError('The number of adversarial directions has to be either 0, 1, or 3.')
# Normalize the first direction
dirs[0] = dirs[0]/torch.norm(dirs[0], p=2)
# Normalize the second direction
dirs[1] = dirs[1]/torch.norm(dirs[1], p=2)
dirs[1] = dirs[1] - torch.dot(dirs[1].view(-1), dirs[0].view(-1))*dirs[0]
dirs[1] = dirs[1]/torch.norm(dirs[1], p=2)
# Normalize the third direction
dirs[2] = dirs[2]/torch.norm(dirs[2], p=2)
proj1 = torch.dot(dirs[2].view(-1), dirs[0].view(-1))
proj2 = torch.dot(dirs[2].view(-1), dirs[1].view(-1))
dirs[2] = dirs[2] - proj1*dirs[0] - proj2*dirs[1]
dirs[2] = dirs[2]/torch.norm(dirs[2], p=2)
# Check if the three directions are orthogonal
Assert_three_orthogonal(dirs)
# Compute the grid outputs
x, y, z, value = Compute_grid_outputs(model, x, y, dirs, lens = lens, resolution = resolution)
# Figure out where to put the subfigure
row_ind = int(num_sub_figures_plotted/subplot_grid[1])
col_ind = num_sub_figures_plotted - row_ind*subplot_grid[1]
row_ind += 1
col_ind += 1
# Add a subfigure
fig.add_trace(
go.Volume(
x=x,
y=y,
z=z,
value=value,
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
),
row=row_ind, col=col_ind
)
num_sub_figures_plotted += 1
else:
break
if num_adv_directions == 0:
title_text="All three directions are random."
elif num_adv_directions == 1:
title_text="X direction is adversarial."
elif num_adv_directions == 3:
title_text="All three directions are adversarial (with different classes)."
else:
raise NameError('The number of adversarial directions has to be either 0, 1, or 3.')
title_text += " Exp name: "
title_text += title
fig.update_layout(height=height, width=width, title_text=title_text)
if show_figure:
fig.show()
if save_figure:
plotly.offline.plot(fig, filename=file_path)
return fig
| StarcoderdataPython |
121909 | <gh_stars>10-100
from __future__ import annotations
import asyncio
import random
from functools import cached_property
from typing import Iterator, Literal, Optional, Union, overload
import discord
from discord.ext import commands
from discord.utils import MISSING
from ditto import BotBase, Cog, Context
from ditto.types import User
from ditto.utils.message import confirm
BoardState = list[list[Optional[bool]]]
STATES = (
"\N{REGIONAL INDICATOR SYMBOL LETTER X}",
"\N{REGIONAL INDICATOR SYMBOL LETTER O}",
)
class Board:
def __init__(
self,
state: BoardState,
current_player: bool = False,
) -> None:
self.state = state
self.current_player = current_player
self.winner: Optional[bool] = MISSING
@property
def legal_moves(self) -> Iterator[tuple[int, int]]:
for c in range(3):
for r in range(3):
if self.state[r][c] is None:
yield (r, c)
@cached_property
def over(self) -> bool:
# vertical
for c in range(3):
token = self.state[0][c]
if token is None:
continue
if self.state[1][c] == token and self.state[2][c] == token:
self.winner = token
return True
# horizontal
for r in range(3):
token = self.state[r][0]
if token is None:
continue
if self.state[r][1] == token and self.state[r][2] == token:
self.winner = token
return True
# descending diag
if self.state[0][0] is not None:
token = self.state[0][0]
if self.state[1][1] == token and self.state[2][2] == token:
self.winner = token
return True
# ascending diag
if self.state[0][2] is not None:
token = self.state[0][2]
if self.state[1][1] == token and self.state[2][0] == token:
self.winner = token
return True
# Check if board is empty
for _ in self.legal_moves:
break
else:
self.winner = None
return True
return False
def move(self, r: int, c: int) -> Board:
if (r, c) not in self.legal_moves:
raise ValueError("Illegal Move")
new_state = [[self.state[r][c] for c in range(3)] for r in range(3)]
new_state[r][c] = self.current_player
return Board(new_state, not self.current_player)
@classmethod
def new_game(cls) -> Board:
state: BoardState = [[None for _ in range(3)] for _ in range(3)]
return cls(state)
class AI:
def __init__(self, player: bool) -> None:
self.player = player
def move(self, game: Board) -> Board:
column = random.choice(tuple(game.legal_moves))
return game.move(*column)
class NegamaxAI(AI):
def __init__(self, player: bool) -> None:
super().__init__(player)
def heuristic(self, game: Board, sign: int) -> float:
if sign == -1:
player = not self.player
else:
player = self.player
if game.over:
if game.winner is None:
return 0
if game.winner == player:
return 1_000_000
return -1_000_000
return random.randint(-10, 10)
@overload
def negamax(
self,
game: Board,
depth: Literal[0] = ...,
alpha: float = ...,
beta: float = ...,
sign: int = ...,
) -> tuple[int, int]:
...
@overload
def negamax(
self,
game: Board,
depth: int = ...,
alpha: float = ...,
beta: float = ...,
sign: int = ...,
) -> float:
...
def negamax(
self,
game: Board,
depth: int = 0,
alpha: float = float("-inf"),
beta: float = float("inf"),
sign: int = 1,
) -> Union[float, tuple[int, int]]:
if game.over:
return sign * self.heuristic(game, sign)
move = MISSING
score = float("-inf")
for c in game.legal_moves:
move_score = -self.negamax(game.move(*c), depth + 1, -beta, -alpha, -sign)
if move_score > score:
score = move_score
move = c
alpha = max(alpha, score)
if alpha >= beta:
break
if depth == 0:
return move
else:
return score
def move(self, game: Board) -> Board:
return game.move(*self.negamax(game))
class Button(discord.ui.Button["Game"]):
def __init__(self, r: int, c: int):
super().__init__(style=discord.ButtonStyle.secondary, label="\u200b", row=c)
self.r = r
self.c = c
def update(self):
cell = self.view.board.state[self.r][self.c]
if cell is not None or self.view.board.over:
self.disabled = True
if cell == True:
self.style = discord.ButtonStyle.success
self.label = "O"
if cell == False:
self.style = discord.ButtonStyle.danger
self.label = "X"
async def callback(self, interaction: discord.Interaction):
self.view.board = self.view.board.move(self.r, self.c)
self.view.update()
if self.view.board.over:
await self.view.game_over(interaction)
return
if self.view.current_player.bot:
self.view.make_ai_move()
self.view.update()
if self.view.board.over:
await self.view.game_over(interaction)
return
await interaction.response.edit_message(
content=f"{self.view.current_player.mention}'s' ({STATES[self.view.board.current_player]}) turn!",
view=self.view,
)
class Game(discord.ui.View):
children: list[Button]
def __init__(self, players: tuple[User, User]):
self.players = list(players)
random.shuffle(self.players)
super().__init__(timeout=None)
self.board = Board.new_game()
if self.current_player.bot:
self.make_ai_move()
for r in range(3):
for c in range(3):
self.add_item(Button(r, c))
self.update()
def update(self):
for child in self.children:
child.update()
async def game_over(self, interaction: discord.Interaction):
if self.board.winner is not None:
content = f"{self.players[self.board.winner].mention} ({STATES[self.board.winner]}) wins!"
else:
content = "Draw!"
for child in self.children:
child.disabled = True # type: ignore
self.stop()
return await interaction.response.edit_message(content=content, view=self)
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user not in self.players:
await interaction.response.send_message("Sorry, you are not playing", ephemeral=True)
return False
elif interaction.user != self.current_player:
await interaction.response.send_message("Sorry, it is not your turn!", ephemeral=True)
return True
def make_ai_move(self):
ai = NegamaxAI(self.board.current_player)
self.board = ai.move(self.board)
@property
def current_player(self) -> User:
return self.players[self.board.current_player]
class TicTacToe(Cog):
async def _get_opponent(self, ctx: Context) -> Optional[discord.Member]:
message = await ctx.channel.send(
embed=discord.Embed(description=f"{ctx.author.mention} wants to play Tic-Tac-Toe.").set_footer(
text="react with \N{WHITE HEAVY CHECK MARK} to accept the challenge."
)
)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
def check(reaction, user):
if reaction.emoji != "\N{WHITE HEAVY CHECK MARK}":
return False
if user.bot:
return False
if user == ctx.author:
return False
return True
try:
_, opponent = await self.bot.wait_for("reaction_add", check=check, timeout=60)
return opponent
except asyncio.TimeoutError:
pass
finally:
await message.delete()
return None
@commands.command(aliases=["tic", "tic_tac_toe"])
# @commands.max_concurrency(1, per=commands.BucketType.channel)
async def tictactoe(self, ctx: Context, *, opponent: Optional[discord.Member] = None):
"""Start a Tic-Tac-Toe game!
`opponent`: Another member of the server to play against. If not is set an open challenge is started.
"""
if ctx.guild is None:
raise commands.BadArgument("You must use this command in a guild.")
if opponent is None:
opponent = await self._get_opponent(ctx)
else:
if opponent == ctx.author:
raise commands.BadArgument("You cannot play against yourself.")
if not opponent.bot:
if not await confirm(
self.bot,
ctx.channel,
opponent,
f"{opponent.mention}, {ctx.author} has challenged you to Tic-Tac-Toe! do you accept?",
):
opponent = None
# If challenge timed out
if opponent is None:
raise commands.BadArgument("Challenge cancelled.")
game = Game((ctx.author, opponent))
await ctx.send(f"{game.current_player.mention}'s (X) turn!", view=game) # type: ignore
def setup(bot: BotBase) -> None:
bot.add_cog(TicTacToe(bot))
| StarcoderdataPython |
3374930 |
# @Title: 二叉树的镜像 (二叉树的镜像 LCOF)
# @Author: 18015528893
# @Date: 2021-01-20 21:00:02
# @Runtime: 56 ms
# @Memory: 14.8 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def mirrorTree(self, root: TreeNode) -> TreeNode:
if root is None:
return root
root.left, root.right = self.mirrorTree(root.right), self.mirrorTree(root.left)
return root
| StarcoderdataPython |
98913 | import re
import datetime
import pytz
import badgebakery
import os
import base64
import requests
import gridfs
from requests.auth import HTTPBasicAuth
import json
from bson.objectid import ObjectId
from flask_login import UserMixin
from pymongo import MongoClient
from flask import jsonify, current_app
from werkzeug.security import generate_password_hash
# Init db
mongo = MongoClient()
db = mongo.xci
fs = gridfs.GridFS(db)
# Exception class for the LR
class LRException(Exception):
pass
# Badge and assertion functions
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in current_app.config['ALLOWED_BADGE_EXTENSIONS']
def fsSaveBadgeFile(badge, grid_name):
return fs.put(badge, contentType=badge.content_type, filename=grid_name)
def fsGetLastVersion(filename):
return fs.get_last_version(filename)
def fsGetByID(_id):
return fs.get(_id)
def getBadgeIdByName(name):
return str(db.badgeclass.find_one({'name': name})['_id'])
def updateAssertion(_id, url):
db.badgeassertion.update({'_id':_id}, {'$set':{'uid':str(_id)}})
db.badgeassertion.update({'_id':_id}, {'$set':{'verify.url':url}})
def getBadgeClass(perf_id, p_id, json_resp=True):
badge = db.badgeclass.find_one({'uuidurl': perf_id,'name': p_id})
if not badge:
return None
del badge['_id']
del badge['uuidurl']
if json_resp:
return jsonify(badge)
else:
return badge
def getBadgeAssertion(ass_id):
ass = db.badgeassertion.find_one({'_id': ObjectId(ass_id)})
if not ass:
return None
del ass['_id']
return jsonify(ass)
def getAllBadgeAssertions(name):
asses = {'assertions':[]}
count = 0
prof = getUserProfile(name)
for k,v in prof['competencies'].items():
for p in v['performances']:
if 'badgeassertionuri' in p.keys():
asses['assertions'].append(p['badgeassertionuri'])
count += 1
asses['count'] = count
return jsonify(asses)
def createAssertion(userprof, uri):
uuidurl = userprof['perfwks'][str(hash(uri))]['uuidurl']
for k, v in userprof['competencies'].items():
if 'performances' in v.keys():
for perf in v['performances']:
if 'badgeassertionuri' not in perf:
badge_uri = getBadgeClass(uuidurl, perf['levelid'], False)['image'][:-4]
badgeassertion = {
'recipient':{
'type': 'email',
'hashed': False,
'identity': userprof['email']
},
'issuedOn': datetime.datetime.now(pytz.utc).isoformat(),
'badge': badge_uri,
'verify':{
'type': 'hosted',
'url': ''
},
'evidence': perf['statementurl']
}
_id = db.badgeassertion.insert(badgeassertion)
assertionuri = current_app.config['DOMAIN_NAME'] + '/assertions/%s' % str(_id)
updateAssertion(_id, assertionuri)
perf['badgeassertionuri'] = assertionuri
perf['badgeclassimageurl'] = badgeassertion['badge'] + ".png"
updateUserProfile(userprof, userprof['username'])
# # Create the baked badge - for later use
# unbaked = os.path.join(os.path.dirname(__file__), 'static/%s.png' % perf['levelid'])
# name_encoding = base64.b64encode('%s-%s' % (perf['levelid'], userprof['email']))
# baked_filename = '%s_%s' % (uuidurl, name_encoding)
# baked = os.path.join(os.path.dirname(__file__), 'static/baked/%s.png' % baked_filename)
# badgebakery.bake_badge(unbaked, baked, perf['badgeassertionuri'])
# # Once baked image is created, store in mongo
# storeBakedBadges()
# Perform actual update of profile
def updateUserProfile(profile, userid):
db.userprofiles.update({'username':userid}, profile, manipulate=False)
# User class to montor who is logged in - inherits from userMixin class from flask_mongo
class User(UserMixin):
def __init__(self, userid, password=None, email=None, first_name=None, last_name=None, roles=None):
self.userprofile = UserProfile(userid, password, email, first_name, last_name, roles)
# self.id = userid
self.password = self.userprofile.profile['password']
self.roles = self.userprofile.profile['roles']
@property
def profile(self):
return self.userprofile.profile
@profile.setter
def profile(self, value):
self.userprofile.profile = value
@property
def id(self):
return self.profile['username']
@property
def last_name(self):
return self.profile['last_name']
@last_name.setter
def last_name(self, value):
self.profile['last_name'] = value
@property
def first_name(self):
return self.profile['first_name']
@first_name.setter
def first_name(self, value):
self.profile['first_name'] = value
@property
def email(self):
return self.profile['email']
@email.setter
def email(self, value):
self.profile['email'] = value
def save(self):
self.userprofile.save()
def getFullAgent(self):
return {
"mbox" : "mailto:%s" % self.profile['email'],
"name" : "%s %s" % (self.profile['first_name'], self.profile['last_name']),
"objectType": "Agent"
}
def getComp(self, uri):
return self.profile['competencies'][str(hash(uri))]
def getCompArray(self):
return self.profile['competencies'].values()
def getAllComps(self):
return self.profile['competencies']
def updateComp(self, json_comp):
self.profile['competencies'][str(hash(json_comp['uri']))] = json_comp
self.save()
for fwk in self.profile['compfwks'].values():
self.updateFwkCompsWithCompletedVal(fwk, json_comp['uri'], json_comp['completed'])
def updateFwkCompsWithCompletedVal(self, fwk, uri, completed):
for c in fwk['competencies']:
if c['type'] != 'http://ns.medbiq.org/competencyframework/v1/':
if c['uri'] == uri:
c['completed'] = completed
else:
self.updateFwkCompsWithCompletedVal(c, uri, completed)
self.save()
def getCompfwk(self, uri):
return self.profile['compfwks'][str(hash(uri))]
def updateFwk(self, json_comp):
self.profile['compfwks'][str(hash(json_comp['uri']))] = json_comp
self.save()
def getCompfwkArray(self):
return self.profile['compfwks'].values()
def getPerfwk(self, uri):
return self.profile['perfwks'][str(hash(uri))]
# Given a URI and Userid, store a copy of the comp in the user profile
def addComp(self, uri):
h = str(hash(uri))
if not self.profile.get('competencies', False):
self.profile['competencies'] = {}
if uri and h not in self.profile['competencies']:
comp = getCompetency(uri)
if comp:
self.profile['competencies'][h] = comp
self.save()
def addFwk(self, uri):
fh = str(hash(uri))
if not self.profile.get('compfwks', False):
self.profile['compfwks'] = {}
if uri and fh not in self.profile['compfwks']:
fwk = getCompetencyFramework(uri)
self.profile['compfwks'][fh] = fwk
for c in fwk['competencies']:
if c['type'] == "http://ns.medbiq.org/competencyframework/v1/":
self.addFwk(c['uri'])
else:
self.addComp(c['uri'])
self.save()
def addPerFwk(self, uri):
fh = str(hash(uri))
if not self.profile.get('perfwks', False):
self.profile['perfwks'] = {}
if uri and fh not in self.profile['perfwks']:
fwk = getPerformanceFramework(uri)
self.profile['perfwks'][fh] = fwk
# find the competency object uri for each component and add it to the user's list of competencies
for curi in (x['entry'] for b in fwk.get('components', []) for x in b.get('competencies', []) if x['type'] != "http://ns.medbiq.org/competencyframework/v1/"):
self.addComp(curi)
self.save()
class UserProfile():
def __init__(self, userid, password=<PASSWORD>, email=None, first_name=None, last_name=None, roles=None):
self.userid = userid
self._profile = db.userprofiles.find_one({'username':userid})
# make one if it didn't return a profile
if not self._profile:
db.userprofiles.insert({'username': userid, 'password':<PASSWORD>),
'email':email, 'first_name':first_name, 'last_name':last_name,
'competencies':{}, 'compfwks':{}, 'perfwks':{}, 'lrsprofiles':[],
'roles':roles})
self._profile = db.userprofiles.find_one({'username':userid})
@property
def profile(self):
return self._profile
@profile.setter
def profile(self, value):
self._profile = self.save(value)
# Update or insert user profile if id is given
def save(self, profile=None):
if profile:
self._profile = profile
db.userprofiles.update({'username':self.userid}, self._profile, manipulate=False)
# LR functions
# Update all comp fwks
def updateCompetencyFrameworkLR(cfwk_id, lr_uri):
db.compfwk.update({'_id': ObjectId(cfwk_id)}, {'$addToSet':{'lr_data':lr_uri}})
updateUserFwkById(cfwk_id)
# Update all per fwks
def updatePerformanceFrameworkLR(pfwk_id, lr_uri):
db.perfwk.update({'_id': ObjectId(pfwk_id)}, {'$addToSet':{'lr_data':lr_uri}})
updateUserPfwkById(pfwk_id)
# Update the comp with new LR data-calls other LR updates
def updateCompetencyLR(c_id,lr_uri):
if isinstance(c_id, basestring):
c_id = ObjectId(c_id)
db.competency.update({'_id': c_id}, {'$addToSet':{'lr_data':lr_uri}})
comp_uri = db.competency.find_one({'_id': c_id})['uri']
updateUserCompLR(comp_uri, lr_uri)
updateCompInFwksLR(comp_uri, lr_uri)
# Update the comp in all users
def updateUserCompLR(c_uri, lr_uri):
h = str(hash(c_uri))
set_field = 'competencies.' + h
db.userprofiles.update({set_field:{'$exists': True}}, {'$addToSet':{set_field+'.lr_data': lr_uri}}, multi=True)
# Updates all comp fwks that contain that comp
def updateCompInFwksLR(c_uri, lr_uri):
# Remove this field in comp before updating the fwk
db.compfwk.update({'competencies':{'$elemMatch':{'uri':c_uri}}}, {'$addToSet': {'competencies.$.lr_data': lr_uri }}, multi=True)
updateUserFwkByURILR(c_uri, lr_uri)
# Updates all comps in fwks that are in the userprofiles
def updateUserFwkByURILR(c_uri, lr_uri):
comp = db.competency.find_one({'uri': c_uri})
if not comp['type'] == 'commoncoreobject':
try:
parents = comp['relations']['childof']
except KeyError:
parents = []
# For each parent fwk the comp is in, update it in that userprofile
for uri in parents:
fwk = db.compfwk.find({'uri': uri})[0]
h = str(hash(uri))
set_field = 'compfwks.' + h + '.competencies'
db.userprofiles.update({set_field:{'$elemMatch':{'uri':c_uri}}}, {'$addToSet':{set_field + '.$.lr_data': lr_uri}}, multi=True)
def sendLRParadata(lr_uri, lr_title, user_role, c_type, c_uri, c_content):
date = datetime.datetime.now(pytz.utc).isoformat()
paradata = {
"documents": [
{
"TOS": {
"submission_TOS": "http://www.learningregistry.org/tos/cc0/v0-5/"
},
"doc_type": "resource_data",
"doc_version": "0.23.0",
"resource_data_type": "paradata",
"active": True,
"identity": {
"owner": "",
"submitter": "ADL",
"submitter_type": "agent",
"signer": "ADL",
"curator": ""
},
"resource_locator": lr_uri,
"payload_placement": "inline",
"payload_schema": [
"LR Paradata 1.0"
],
"resource_data": {
"activity":{
"actor":{
"description": ["ADL XCI " + user_role, lr_title],
"objectType": user_role
},
"verb":{
"action": "matched",
"date": date,
"context":{
"id":current_app.config['DOMAIN_NAME'],
"description":"ADL's XCI project",
"objectType": "Application"
}
},
"object":{
"id": lr_uri
},
"related":[{
"objectType": c_type,
"id": c_uri,
"content": c_content
}],
"content": "A resource found at "+lr_uri+" was matched to the "+c_type+" with ID "+c_uri+" by an "+user_role+" on "+current_app.config['DOMAIN_NAME']+" system on "+date
}
}
}
]
}
r = requests.post(current_app.config['LR_PUBLISH_ENDPOINT'], data=json.dumps(paradata), headers={"Content-Type":"application/json"},
auth=HTTPBasicAuth(current_app.config['LR_PUBLISH_NAME'], current_app.config['LR_PUBLISH_PASSWORD']), verify=False)
if r.status_code != 200:
message = json.loads(r.content)['message']
raise LRException(message)
else:
return json.loads(r.content)['document_results'][0]['doc_ID']
# General comp/fwk functions
# Use on search comp page-searches for search keyword in comp titles
def searchComps(key):
regx = re.compile(key, re.IGNORECASE)
return db.competency.find({"title": regx})
# Update or insert competency depending if it exists
def saveCompetency(json_comp):
if not json_comp.get('lastmodified', False):
json_comp['lastmodified'] = datetime.datetime.now(pytz.utc).isoformat()
if getCompetency(json_comp['uri']):
updateCompetency(json_comp)
else:
db.competency.insert(json_comp, manipulate=False)
# Update all comp fwks in the user by id
def updateUserFwkById(cfwk_id):
fwk = db.compfwk.find_one({'_id': ObjectId(cfwk_id)})
h = str(hash(fwk['uri']))
set_field = 'compfwks.' + h
db.userprofiles.update({set_field:{'$exists': True}}, {'$set':{set_field:fwk}}, multi=True)
# Update all per fwks in the user by id
def updateUserPfwkById(pfwk_id):
fwk = db.perfwk.find_one({'_id': ObjectId(pfwk_id)})
h = str(hash(fwk['uri']))
set_field = 'perfwks.' + h
db.userprofiles.update({set_field:{'$exists': True}}, {'$set':{set_field:fwk}}, multi=True)
# Update the competency by uri
def updateCompetency(json_comp):
db.competency.update({'uri':json_comp['uri']}, json_comp, manipulate=False)
# Get the competency based on uri
def getCompetency(uri, objectid=False):
if objectid:
return db.competency.find_one({'uri':uri})
return db.competency.find_one({'uri':uri}, {'_id':0})
# Update comp by id
def updateCompetencyById(cid, comp):
comp['lastmodified'] = datetime.datetime.now(pytz.utc).isoformat()
db.competency.update({'_id': ObjectId(cid)}, comp, manipulate=False)
# Get comp by id
def getCompetencyById(cid, objectid=False):
if objectid:
return db.competency.find_one({'_id': ObjectId(cid)})
return db.competency.find_one({'_id': ObjectId(cid)}, {'_id':0})
# Just return one comp
def findoneComp(d):
return db.competency.find_one(d)
# Return comps based on search param and sort
def findCompetencies(d=None, sort=None, asc=1):
if sort:
return [x for x in db.competency.find(d).sort(sort, asc)]
return [x for x in db.competency.find(d)]
# return comp fwks based on search param
def findCompetencyFrameworks(d=None):
return [x for x in db.compfwk.find(d)]
# Update or create comp fwk based on uri
def saveCompetencyFramework(json_fwk):
if getCompetencyFramework(json_fwk['uri']):
updateCompetencyFramework(json_fwk)
else:
db.compfwk.insert(json_fwk, manipulate=False)
# Update actual comp
def updateCompetencyFramework(json_fwk):
db.compfwk.update({'uri':json_fwk['uri']}, json_fwk, manipulate=False)
# Return one comp fwk based on uri
def getCompetencyFramework(uri, objectid=False):
if objectid:
return db.compfwk.find_one({'uri':uri})
return db.compfwk.find_one({'uri': uri}, {'_id':0})
# Update or create per fwk
def savePerformanceFramework(json_fwk):
if getPerformanceFramework(json_fwk['uri']):
updatePerformanceFramework(json_fwk)
else:
db.perfwk.insert(json_fwk, manipulate=False)
# Create badgeclasses when created the perfwk
for c in json_fwk['components']:
for p in c['performancelevels']:
badgeclass = {
"name": p['id'],
"description": p['description'],
"image": '%s/%s/%s/%s/%s.png' % (current_app.config['DOMAIN_NAME'], current_app.config['BADGE_UPLOAD_FOLDER'], json_fwk['uuidurl'], c['id'], p['id']),
"criteria": json_fwk['uri'] + '.xml',
"issuer": '%s/%s/issuer' % (current_app.config['DOMAIN_NAME'], current_app.config['BADGE_UPLOAD_FOLDER']),
'uuidurl': json_fwk['uuidurl']
}
db.badgeclass.insert(badgeclass)
p['badgeclassimage'] = badgeclass['image']
# Update the perfwk wiht the badgeclassimage fields
updatePerformanceFramework(json_fwk)
# Update actual per fwk
def updatePerformanceFramework(json_fwk):
val = db.perfwk.update({'uri':json_fwk['uri']}, json_fwk, manipulate=False)
pfwk_id = db.perfwk.find_one({'uri':json_fwk['uri']})['_id']
updatePerfFwkUserProfile(pfwk_id)
def updatePerfFwkUserProfile(pfwk_id):
fwk = db.perfwk.find_one({'_id': pfwk_id})
h = str(hash(fwk['uri']))
set_field = 'perfwks.' + h
db.userprofiles.update({set_field:{'$exists': True}}, {'$set':{set_field:fwk}}, multi=True)
# Get one per fwk
def getPerformanceFramework(uri, objectid=False):
if objectid:
return db.perfwk.find_one({'uri':uri})
return db.perfwk.find_one({'uri':uri}, {'_id':0})
# Return per fwk based on search criteria
def findPerformanceFrameworks(d=None):
return [x for x in db.perfwk.find(d)]
# Use on search comp page-searches for search keyword in comp titles
def searchComps(key):
regx = re.compile(key, re.IGNORECASE)
return db.competency.find({"title": regx})
def checkUsernameExists(username):
return db.userprofiles.find_one({'username':username}) is not None
def checkEmailExists(email):
return db.userprofiles.find_one({'email':email}) is not None
def create_questions(form):
data = []
q_dict = {}
for i in range(1,11):
st_i = str(i)
q_dict = {}
q_dict['type'] = form.get('types' + st_i)
q_dict['question'] = form.get('question' + st_i + 'text')
if q_dict['type'] == 'short answer':
q_dict['correct'] = form.get('question' + st_i + 'answer').split(' ')
elif q_dict['type'] == 'true/false':
q_dict['correct'] = form.get('question' + st_i + 'answer') in ['True', 'true']
q_dict['answers'] = [True, False]
else:
q_dict['correct'] = form.get('question' + st_i + 'answer')
q_dict['answers'] = form.get('question' + st_i + 'choices').strip().split(',')
data.append(q_dict)
return data
def grade_results(types, answers, responses, data):
wrong = 0
for x in range(0,5):
if types[x] == 'true/false':
if answers[x] != responses[x]:
data[x+1]['result']['success'] = False
wrong += 1
elif types[x] == 'choice':
if answers[x].strip() != responses[x].strip():
data[x+1]['result']['success'] = False
wrong += 1
else:
if not set(answers[x].lower().strip().split(",")).issubset([str(i).lower().strip() for i in responses[x].split(" ")]):
data[x+1]['result']['success'] = False
wrong += 1
return wrong, data
def retrieve_statements(status, post_content, endpoint, headers):
stmts = []
jstmts = []
sens = []
if status == 200:
content = json.loads(post_content)
for x in range(0,7):
stmts.append(requests.get(endpoint + '?statementId=%s' % content[x], headers=headers, verify=False).content)
jstmts.append(json.loads(stmts[x]))
sens.append("{0} {1} {2}".format(jstmts[0]['actor']['name'], jstmts[0]['verb']['display']['en-US'], jstmts[0]['object']['definition']['name']['en-US']))
for x in range(1, 6):
sens.append("{0} {1} {2} ({3}) with {4}. (Answer was {5})".format(jstmts[x]['actor']['name'], jstmts[x]['verb']['display']['en-US'],
jstmts[x]['object']['definition']['name']['en-US'], jstmts[x]['object']['definition']['description']['en-US'], jstmts[x]['result']['response'],
jstmts[x]['result']['extensions']['answer:correct_answer']))
sens.append("{0} {1} {2}".format(jstmts[6]['actor']['name'], jstmts[6]['verb']['display']['en-US'], jstmts[6]['object']['definition']['name']['en-US']))
return stmts, sens
def get_result_statements(responses, answers, types, questions, actor, actor_name, quiz_name, display_name, comp_uri):
data = [
{
'actor': actor,
'verb': {'id': 'http://adlnet.gov/expapi/verbs/attempted', 'display':{'en-US': 'attempted'}},
'object':{'id':quiz_name,
'definition':{'name':{'en-US':display_name}}}
}
]
for x in range(0,5):
data.append({
'actor': actor,
'verb': {'id': 'http://adlnet.gov/expapi/verbs/answered', 'display':{'en-US': 'answered'}},
'object':{'id':quiz_name + '_question' + str(x+1), 'definition':{'name':{'en-US':display_name + ' question' + str(x+1)}, 'description':{'en-US':questions[x]}}},
'context':{'contextActivities':{'parent':[{'id': quiz_name}]}},
'result':{'success': True, 'response': responses[x],'extensions': {'answer:correct_answer': answers[x]}}
})
wrong, data = grade_results(types, answers, responses, data)
data.append({
'actor': actor,
'verb': {'id': 'http://adlnet.gov/expapi/verbs/passed', 'display':{'en-US': 'passed'}},
'object':{'id':quiz_name, 'definition':{'name':{'en-US':display_name}}},
'result':{'score':{'min': 0, 'max': 5, 'raw': 5 - wrong}},
'context':{'contextActivities':{'other':[{'id': comp_uri}]}}
})
if wrong >= 2:
data[6]['verb']['id'] = 'http://adlnet.gov/expapi/verbs/failed'
data[6]['verb']['display']['en-US'] = 'failed'
return wrong, data
# Update the comp with quiz - calls other updates
def addCompetencyQuiz(c_id, data):
db.competency.update({'_id': ObjectId(c_id)}, {'$set':{'quiz':data}})
comp_uri = db.competency.find_one({'_id': ObjectId(c_id)})['uri']
updateUserCompQuiz(comp_uri, data)
updateCompInFwksQuiz(comp_uri, data)
# Update the comp in all users
def updateUserCompQuiz(c_uri, data):
h = str(hash(c_uri))
set_field = 'competencies.' + h
db.userprofiles.update({set_field:{'$exists': True}}, {'$set':{set_field+'.quiz': data}}, multi=True)
# Updates all comp fwks that contain that comp
def updateCompInFwksQuiz(c_uri, data):
db.compfwk.update({'competencies':{'$elemMatch':{'uri':c_uri}}}, {'$set': {'competencies.$.quiz': data }}, multi=True)
updateUserFwkByURIQuiz(c_uri, data)
# Updates all comps in fwks that are in the userprofiles
def updateUserFwkByURIQuiz(c_uri, data):
comp = db.competency.find_one({'uri': c_uri})
if not comp['type'] == 'commoncoreobject':
try:
parents = comp['relations']['childof']
except KeyError:
parents = []
# For each parent fwk the comp is in, update it in that userprofile
for uri in parents:
fwk = db.compfwk.find({'uri': uri})[0]
h = str(hash(uri))
set_field = 'compfwks.' + h + '.competencies'
db.userprofiles.update({set_field:{'$elemMatch':{'uri':c_uri}}}, {'$set':{set_field + '.$.quiz': data}}, multi=True)
# Admin reset functions
# Drop all of the comp collections
def dropCompCollections():
db.drop_collection('competency')
db.drop_collection('compfwk')
db.drop_collection('perfwk')
for u in db.userprofiles.find():
u['competencies'] = {}
u['perfwks'] = {}
u['compfwks'] = {}
updateUserProfile(u, u['username'])
# Drop the database
def dropAll():
return mongo.drop_database(db)
| StarcoderdataPython |
3275886 | """
Plot results of the criterion
Author : <NAME>
Date : 11/10/2016
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import cPickle as pickle
import numpy
from itertools import cycle
from sklearn.metrics import auc, average_precision_score,precision_recall_curve
import seaborn as sns
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
inputdata='output/test/'
colors = cycle(['cyan', 'indigo', 'seagreen', 'gold', 'blue',
'darkorange','red','grey','darkviolet','mediumslateblue','chocolate'])
crit_names_label = [#"Pearson's correlation",
#"AbsPearson's correlation",
#"Pvalue Pearson",
r"$\textrm{Chi2 test}$",
r"$\textrm{NMutual information}$",
r"$\textrm{AMutual information}$",
r"$\textrm{Corrected Cramer's V}$",
r"$\textrm{Mercer Kernels}$",
#"Lopez-Paz Causation coefficient",
r"$\textrm{FSIC}$"
#"BF2d mutual info",
#"BFMat mutual info",
#"ScPearson correlation",
#"ScPval-Pearson"
]
crit_names = [#"Pearson's correlation",
#"AbsPearson's correlation",
#"Pvalue Pearson",
"Chi2 test",
"NMutual information",
"AMutual information",
"Corrected Cramer's V",
"Mercer Kernels",
#"Lopez-Paz Causation coefficient",
"FSIC"
#"BF2d mutual info",
#"BFMat mutual info",
#"ScPearson correlation",
#"ScPval-Pearson"
]
results=[]
f, axarr = plt.subplots(2, sharex=True)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
fig2=plt.figure()
ax2=fig2.add_subplot(111)
# for crit in crit_names:
# inputfile=inputdata+'test_crit_l_'+crit[:4]+'-1.csv'
# if os.path.exists(inputfile):
# print(crit),
# df=pd.read_csv(inputfile,sep=';')
# df=df[['Target','Pairtype']]
# df=df[numpy.isfinite(df['Target'])]
# #df['Target']=df['Target'].astype('float')
# #print(df.dtypes)
# if crit[:4]!='Lope':
# df=df.sort_values(by='Target', ascending=False)
# else:
# df=df.sort_values(by='Target', ascending=True)
# #print(df)
# N_max=len(df.index)
# print(N_max),
# N=0.0
# Mprobes_max = (df['Pairtype']=='P').sum()
# print(Mprobes_max)
# Mprobes = 0.0
# FDR=[]
# for index,row in df.iterrows():
# N=N+1
# if row['Pairtype']=='P':
# Mprobes+=1
# FDR.append((N_max/N)*(Mprobes/Mprobes_max))
# results.append(FDR)
# pickle.dump(results,open(inputdata+'res.p','wb'))
# #results=pickle.load(open(inputdata+'res.p','rb'))
# '''for i in range(len(results)-1):
# print(results[i]==results[-1])
# print(len())'''
# #print(results)
# for i,color in zip(results,colors):
# ax1.plot(range(len(i)),i,color=color)
# ax2.plot(range(len(i)),i,color=color)
# plt.legend(crit_names_label,loc=4)
# plt.xlabel(r"$\textrm{Number of probes retrieved}$")
# plt.ylabel(r"$\textrm{False discovery rate}$")
# ax1.set_xscale('log')
# plt.show()#FDR w/ probes
colors = cycle(['cyan', 'indigo', 'seagreen', 'gold', 'blue', 'darkorange','red','grey','darkviolet','mediumslateblue'])
for crit, color, name_label in zip (crit_names,colors,crit_names_label):
tpr=[]
fpr=[]
ppv=[]
print(crit)
try:
with open("kaggle/norm_fd/CEfinal_train_"+crit[:4]+'.csv','r') as results_file:
df=pd.read_csv(results_file,sep=';')
df=df.sort_values(by='Target', ascending=False)
P=float((df['Pairtype']!=4).sum())
Plist=(df['Pairtype']!=4).tolist()
N=float((df['Pairtype']==4).sum())
TP=0.0
FP=0.0
for index,row in df.iterrows():
if crit[:4]!='Lope':
if row['Pairtype']==4:
FP+=1.0
else:
TP+=1.0
else:
if row['Pairtype']!=4:
FP+=1.0
else:
TP+=1.0
tpr.append(TP/P) #TPR=recall
fpr.append(FP/N) #FPR
ppv.append(TP/(TP+FP))
tpr,fpr, ppv= (list(t) for t in zip(*sorted(zip(tpr,fpr,ppv))))
auc_score=auc(fpr,tpr)
pres,rec,_= precision_recall_curve(Plist,df['Target'].tolist())
ac_pr_score=average_precision_score(Plist,df['Target'].tolist())
pl1=ax1.plot(fpr,tpr,label=name_label+r' $ (\textrm{area} : $'+r' ${0:3f})$'.format(auc_score),color=color)
pl2=ax2.plot(rec,pres,label=name_label+r' $ (\textrm{area} : $'+r' ${0:3f})$'.format(ac_pr_score),color=color)
except IOError:
continue
ax1.plot([0, 1], [0, 1], linestyle='--', color='k',
label=r"$\textrm{Luck}$")
ax1.set_xlabel(r"$\textrm{False Positive Rate}$")
ax1.set_ylabel(r"$\textrm{'True Positive Rate}$")
ax1.set_title(r"$\textrm{ROC Curve of independence tests on the}$"+" \n"+r"$\textrm{ Chalearn Cause-effect pairs challenge dataset}$")
ax1.legend(loc="lower right")
ax2.set_xlabel(r"$\textrm{Recall}$")
ax2.set_ylabel(r"$\textrm{Precision}$")
ax2.set_title(r"$\textrm{Precision recall curves on the}$"+ "\n" +r"$\textrm{Chalearn Cause-effect pairs challenge dataset}$")
ax2.legend(loc='best')
plt.show()
| StarcoderdataPython |
3208969 | import logging
import joblib
import scipy.sparse as sparce
import numpy as np
def save_matrix(df,matrix,out_path):
id_matrix = sparce.csr_matrix(df.id.astype(np.int64)).T
label_matrix = sparce.csr_matrix(df.label.astype(np.int64)).T
result = sparce.hstack([id_matrix,label_matrix,matrix],format="csr")
joblib.dump(result,out_path)
msg = f"The output matrix saved at: {out_path} of the size:{result.shape} and data type:{result.dtype}"
logging.info(msg)
| StarcoderdataPython |
1710354 | # Aplicação em Python para detectar se uma pessoa tem diabetes ou não, usando Machine Learning!
#Importando os pacotes Python
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from PIL import Image
import streamlit as st
#Cabeçalho
st.header(' DIABETES - PYTHON WEB APP')
#Título e subtítulo
st.write("""
# DETECÇÃO DE DIABETES
# Aplicação em Python para detectar se uma pessoa tem diabetes ou não, usando Machine Learning!
# """)
#Exibir imagem
image = Image.open('img/neural_networks.gif')
st.image(image, caption='Diabetes Neural Networks', use_column_width=True)
#Carregar dataset csv
df = pd.read_csv('Dataset/diabetes.csv')
#Definir subheader de informações dos dados
# st.subheader('Data Information')
#
# #Exibir tabelas
# st.dataframe(df)
#
# #Exibir estatísticas
# st.write(df.describe())
#
# #Exibir gráfico
#chart = st.bar_chart(df)
#Dividir os dados em variáveis X independentes e Y dependentes
X = df.iloc[:, 0:8].values
y = df.iloc[:, -1].values
#Dividir os dados para treinamento e testes
X_train ,X_test, y_train ,y_test = train_test_split(X,y,test_size=0.25,random_state=0)
#Obter os dados de entrada do usuário
def get_user_input():
pregnancies = st.sidebar.slider('Gravidez',0,17,1)
glucose = st.sidebar.slider('Glicose',0,199,117)
blood_pressure = st.sidebar.slider('Pressão Arterial',0,199,117)
skin_thickness = st.sidebar.slider('Espessura de Pele',0,99,23)
insulin = st.sidebar.slider('Insulina',0.0,846.0,30.0)
bmi = st.sidebar.slider('Índice_de_Massa_Corpórea',0.0,67.1,32.0)
diabetes_pedigree_function = st.sidebar.slider('Histórico de Diabetes na família',0.078,2.42,0.3725)
age = st.sidebar.slider('Idade',21,81,29)
user_data = ({
'Gravidez' : [pregnancies],
'Glicose' : [glucose],
'Pressão Arterial' : [blood_pressure],
'Espessura de Pele' : [skin_thickness],
'Insulina' : [insulin],
'IMC' : [bmi],
'Histórico de Diabetes na família' : [diabetes_pedigree_function],
'Idade' : [age]
})
#Transformar os dados em um DataFrame
features = pd.DataFrame(user_data)
return features
user_input = get_user_input()
st.subheader('Entrada do usuário')
st.write(user_input)
RandomForestClassifier = RandomForestClassifier()
RandomForestClassifier.fit(X_train,y_train)
#Modelo de acurácia
st.subheader('Modelo teste de Precisão!')
st.write(str(accuracy_score(y_test, RandomForestClassifier.predict(X_test)) * 100)+'%')
prediction = RandomForestClassifier.predict(user_input)
#Resultado
st.subheader('Você tem Diabetes?: Sim(1) Não(0) ')
st.write(int(prediction))
| StarcoderdataPython |
192852 | from .middleware import TestMasterMiddleware
name = 'scrapy-testmaster'
| StarcoderdataPython |
1648936 | import json
from typing import Union
from .validator import (
CollectionErrors,
NoneValueException,
Validator,
ValidationError,
StopValidation
)
class Array(Validator):
def __init__(self,
message: Union[str, None] = None,
parse: bool = True,
rules: Union[list, tuple, None] = None) -> None:
self.parse = parse
self.message = message or 'This field must be a list or tuple.'
self.rules = rules
def handler(self, value, field, request):
if not isinstance(value, list):
raise ValidationError(self.message)
errors = self.array_handler(value, field, request)
if len(errors) > 0:
raise CollectionErrors(errors)
def array_handler(self, value, field, request):
allErrors = dict()
if self.rules is not None:
for i in range(len(value)):
fieldname = f"{field}.{i}"
errors = []
for validator in self.rules:
try:
value[i] = validator(value[i], fieldname, field, request)
except StopValidation as err:
errors.append(str(err))
break
except ValidationError as err:
errors.append(str(err))
except CollectionErrors as err:
errors.append(err.errors)
except NoneValueException:
break
if len(errors) > 0:
allErrors[fieldname] = errors
return allErrors
| StarcoderdataPython |
3272449 | <filename>RushHourPy/library_of_states.py
# Cards in original puzzle
# Supplemental Cards
#1
#2
#3
#4
...
######### Various States
#########
#########
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.