hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7958432ffa8b6002827c38fb885c0031e54a57ea | 4,563 | py | Python | toolsws/backends/gridengine.py | diegodlh/operations-software-tools-webservice | c5e3aaaaa852c9c0dafc580774c27c247657913f | [
"Apache-2.0"
] | 8 | 2016-04-12T18:05:40.000Z | 2020-11-23T18:51:47.000Z | toolsws/backends/gridengine.py | diegodlh/operations-software-tools-webservice | c5e3aaaaa852c9c0dafc580774c27c247657913f | [
"Apache-2.0"
] | null | null | null | toolsws/backends/gridengine.py | diegodlh/operations-software-tools-webservice | c5e3aaaaa852c9c0dafc580774c27c247657913f | [
"Apache-2.0"
] | 3 | 2018-08-30T15:01:46.000Z | 2022-02-27T16:16:53.000Z | import os
import re
import subprocess
import xml.etree.ElementTree as ET
from toolsws.utils import wait_for
from toolsws.wstypes import GenericWebService
from toolsws.wstypes import JSWebService
from toolsws.wstypes import LighttpdPlainWebService
from toolsws.wstypes import LighttpdWebService
from toolsws.wstypes import PythonWebService
from toolsws.wstypes import TomcatWebService
from toolsws.wstypes import UwsgiWebService
from .backend import Backend
class GridEngineBackend(Backend):
"""
A gridengine job that starts / stops a HTTP serving process (webservice)
"""
# Specify config for each type that this backend accepts
# Key is name of type passed in by commandline
# cls is the Webservice class to instantiate
# queue is an optional key that spcifies which queue to run ths one.
# options are: webgrid-lighttpd, webgrid-generic
# defaults to 'webgrid-generic'
# release is an optional key that specifies which release to run this on.
# options are: stretch, buster
# defaults to stretch
CONFIG = {
"lighttpd": {"cls": LighttpdWebService, "queue": "webgrid-lighttpd"},
"lighttpd-plain": {
"cls": LighttpdPlainWebService,
"queue": "webgrid-lighttpd",
},
"uwsgi-python": {"cls": PythonWebService},
"uwsgi-plain": {"cls": UwsgiWebService},
"nodejs": {"cls": JSWebService},
"tomcat": {"cls": TomcatWebService},
"generic": {"cls": GenericWebService},
}
def __init__(self, tool, wstype, release, extra_args=None):
super(GridEngineBackend, self).__init__(
tool, wstype, extra_args=extra_args
)
cfg = GridEngineBackend.CONFIG[self.wstype]
self.webservice = cfg["cls"](tool, extra_args)
self.release = cfg.get("release", release)
self.queue = cfg.get("queue", "webgrid-generic")
self.name = "{wstype}-{toolname}".format(
wstype=self.wstype, toolname=tool.name
)
try:
memlimit = "/data/project/.system/config/{}.web-memlimit".format(
self.tool.name
)
with open(memlimit) as f:
self.memlimit = f.read().strip()
except IOError:
self.memlimit = "4G"
def _get_job_xml(self):
"""
Gets job status xml of this job
:return: ET xml object if the job is found, None otherwise
"""
output = subprocess.check_output(["qstat", "-xml"])
# Fix XML.
output = re.sub("JATASK:[^>]*", "jatask", output.decode("utf-8"))
# GE is stupid.
# Returns output like:
# <><ST_name>blah</ST_name></>
# If the job is not found.
if "<unknown_jobs" in output and "<>" in output:
return None
xml = ET.fromstring(output)
job_name_node = xml.find('.//job_list[JB_name="%s"]' % self.name)
return job_name_node
def request_start(self):
self.webservice.check(self.wstype)
cmd = [
"qsub",
"-e",
os.path.expanduser("~/error.log"),
"-o",
os.path.expanduser("~/error.log"),
"-i",
"/dev/null",
"-q",
self.queue,
"-l",
"h_vmem=%s,release=%s" % (self.memlimit, self.release),
"-b",
"y",
"-N",
self.name,
"/usr/bin/webservice-runner",
"--register-proxy",
"--type",
self.webservice.name,
]
if self.extra_args:
cmd.extend(self.extra_args)
subprocess.check_call(cmd, stdout=open(os.devnull, "wb"))
def request_stop(self):
cmd = ["/usr/bin/qdel", self.name]
subprocess.check_call(cmd, stdout=open(os.devnull, "wb"))
def request_restart(self):
# On the grid, it is important to take down the service before starting
# it so it runs portreleaser, etc.
self.request_stop()
wait_for(lambda: self.get_state() == Backend.STATE_STOPPED, "")
self.request_start()
wait_for(
lambda: self.get_state() == Backend.STATE_RUNNING, "Restarting..."
)
def get_state(self):
job = self._get_job_xml()
if job is not None:
state = job.findtext(".//state").lower()
if "r" in state:
return Backend.STATE_RUNNING
else:
return Backend.STATE_PENDING
return Backend.STATE_STOPPED
| 33.306569 | 79 | 0.584046 |
795843449e1155055ac6a9286b9b10c290847472 | 303 | py | Python | django_actionable_messages/elements.py | utsurius/django-actionable-messages | 5b190bce74afcbe7fc41039f8cf87b04173208e6 | [
"MIT"
] | null | null | null | django_actionable_messages/elements.py | utsurius/django-actionable-messages | 5b190bce74afcbe7fc41039f8cf87b04173208e6 | [
"MIT"
] | null | null | null | django_actionable_messages/elements.py | utsurius/django-actionable-messages | 5b190bce74afcbe7fc41039f8cf87b04173208e6 | [
"MIT"
] | null | null | null | from typing import Union
from django_actionable_messages.mixins import CardElement
class Header(CardElement):
def __init__(self, name: str, value: Union[str, int], **kwargs):
self._data = {
"name": name,
"value": value
}
super().__init__(**kwargs)
| 23.307692 | 68 | 0.613861 |
795844a4e1cf49cf35d22c5b34499ad59f45e14b | 3,498 | py | Python | discrimination/youtube-nokey.py | panosprotopapas/Swisscom-Discrimination-Project | e84fd68326ef17f55785d8ee42dd94653430a110 | [
"Apache-2.0"
] | null | null | null | discrimination/youtube-nokey.py | panosprotopapas/Swisscom-Discrimination-Project | e84fd68326ef17f55785d8ee42dd94653430a110 | [
"Apache-2.0"
] | null | null | null | discrimination/youtube-nokey.py | panosprotopapas/Swisscom-Discrimination-Project | e84fd68326ef17f55785d8ee42dd94653430a110 | [
"Apache-2.0"
] | null | null | null | ################################
################################
###### PACKAGES ########
################################
################################
import re
import os
import json
import pymongo
import discrimination
import googleapiclient.discovery
import pickle
################################
################################
######### SCRAPER ##########
################################
################################
def scrape(
url,
client = "mongodb://localhost:27017/",
database = "database",
collection = "youtube",
googleapikey = "",
token = ""
):
'''Scrape all comments from the provided youtube url and save them in Mongo DB.
Parameters
----------
url : str.
The youtube video url.
client, database, collection : strings.
Location of the Mongo DB client, database and collection.
googleapikey: string.
The key to access google's API.
token: string
The next page string provided when scraping youtube. If left empty scraping starts from the beginning of the comments.'''
# Get video id
video_id = re.findall('v=.{11}', url)[0][-11:]
# Scraped flag and counter
scraped = False
counter = 0
# Google's API settings
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = googleapikey
# Set-up
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey = DEVELOPER_KEY)
# As long as the last page hasn't been reached (next page token is None in the last page)...
while token != None:
# Counter + 1
counter += 1
# Set-up the request and make it
request = youtube.commentThreads().list(
part = "snippet,replies",
maxResults = 100,
videoId = video_id,
pageToken = token
)
r = request.execute()
# Save texts here
texts = []
# Append comments (and replies to comments) in the list created
for item in r["items"]:
comment = item["snippet"]["topLevelComment"]["snippet"].get("textOriginal")
texts.append(comment)
replies = item.get("replies")
if replies != None :
for subitem in replies["comments"]:
reply = subitem["snippet"].get("textOriginal")
texts.append(reply)
# Get the next page token (next 100 comments)
token = r.get("nextPageToken")
# Save the text and video_id to Mongo (if they don't exist)
table = discrimination.mongo.collection(client, database, collection)
for text in texts:
if table.count_documents({
"text" : text,
"video_id" : video_id}) == 0:
table.insert_one({
"text" : text,
"video_id" : video_id,
"sexist" : -1
})
# Keep track of the next page token by saving it in Mongo
table = discrimination.mongo.collection(client, database, "youtubetemp")
table.insert_one({
"token" : token,
"counter" : counter
})
# Change scraped flag
scraped = True
return(scraped);
| 30.155172 | 133 | 0.492281 |
795844e877fb56ab0dfb19afb3649f9c1ca65f57 | 4,479 | py | Python | NAS/cifar100/models/resnet.py | TiffanyTseng54/SimpleCVReproduction | 52754eb924c6c094af0b8089a1bd7af0be7ec1a3 | [
"Apache-2.0"
] | 1 | 2022-03-03T06:04:34.000Z | 2022-03-03T06:04:34.000Z | NAS/cifar100/models/resnet.py | TiffanyTseng54/SimpleCVReproduction | 52754eb924c6c094af0b8089a1bd7af0be7ec1a3 | [
"Apache-2.0"
] | null | null | null | NAS/cifar100/models/resnet.py | TiffanyTseng54/SimpleCVReproduction | 52754eb924c6c094af0b8089a1bd7af0be7ec1a3 | [
"Apache-2.0"
] | null | null | null | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['ResNet18', "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size()) | 33.177778 | 83 | 0.606832 |
795844fad40837b94c865d1da49250475681ab54 | 4,204 | py | Python | python/tvm/relay/op/image/_image.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | null | null | null | python/tvm/relay/op/image/_image.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | null | null | null | python/tvm/relay/op/image/_image.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
import topi
from topi.util import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# resize
@reg.register_compute("image.resize")
def compute_resize(attrs, inputs, out_type):
size = attrs.size
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
out_dtype = attrs.out_dtype
return [topi.image.resize(inputs[0], size, layout, method, coord_trans, out_dtype)]
reg.register_injective_schedule("image.resize")
@reg.register_compute("image.resize3d")
def compute_resize3d(attrs, inputs, out_type):
size = attrs.size
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
out_dtype = attrs.out_dtype
return [topi.image.resize3d(inputs[0], size, layout, method, coord_trans, out_dtype)]
reg.register_injective_schedule("image.resize3d")
# crop and resize
@reg.register_compute("image.crop_and_resize")
def compute_crop_and_resize(attrs, inputs, out_type):
crop_size = attrs.crop_size
layout = attrs.layout
method = attrs.method
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [topi.image.crop_and_resize(inputs[0], inputs[1], inputs[2],
crop_size, layout, method,
extrapolation_value, out_dtype)]
reg.register_injective_schedule("image.crop_and_resize")
@script
def _crop_and_resize_func(image_shape, boxes_shape, crop_size,
height_axis, width_axis, channel_axis):
out = output_tensor((4,), "int64")
out[0] = boxes_shape[0]
out[height_axis] = int64(crop_size[0])
out[width_axis] = int64(crop_size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.crop_and_resize", False)
def crop_and_resize_func(attrs, inputs, _):
"""
Shape function for crop_and_resize op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
crop_size = get_const_tuple(attrs.crop_size)
return [_crop_and_resize_func(inputs[0], inputs[1], convert(crop_size),
convert(height_axis), convert(width_axis), convert(channel_axis))]
# dilation2d
reg.register_strategy("image.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("image.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# affine_grid
@reg.register_compute("image.affine_grid")
def compute_affine_grid(attrs, inputs, out_dtype):
target_shape = get_const_tuple(attrs.target_shape)
return [topi.image.affine_grid(inputs[0], target_shape)]
reg.register_injective_schedule("image.affine_grid")
# grid_sample
@reg.register_compute("image.grid_sample")
def compute_grid_sample(attrs, inputs, out_dtype):
method = attrs.method
layout = attrs.layout
return [topi.image.grid_sample(inputs[0], inputs[1], method, layout)]
reg.register_injective_schedule("image.grid_sample")
| 34.743802 | 100 | 0.725737 |
795846ca7df89602de9b3a5f0e85ac6c09825572 | 275 | py | Python | pharos/utils.py | Yiling-J/pharos | a6dd80bd7c0475d78e6490735e3e5fd6eebc28c5 | [
"BSD-3-Clause"
] | 1 | 2021-12-03T16:28:41.000Z | 2021-12-03T16:28:41.000Z | pharos/utils.py | Yiling-J/pharos | a6dd80bd7c0475d78e6490735e3e5fd6eebc28c5 | [
"BSD-3-Clause"
] | 14 | 2021-04-08T14:16:27.000Z | 2021-05-24T15:15:11.000Z | pharos/utils.py | Yiling-J/pharos | a6dd80bd7c0475d78e6490735e3e5fd6eebc28c5 | [
"BSD-3-Clause"
] | null | null | null | from collections import UserDict
class ReadOnlyDict(UserDict):
def __init__(self, data):
self.data = data
def __setitem__(self, key, value):
raise TypeError("readonly dict")
def __delitem__(self, key):
raise TypeError("readonly dict")
| 21.153846 | 40 | 0.669091 |
795847d6c54a5ad5ad34335eed6c4d2c798c5692 | 2,450 | py | Python | ppma/imagenet/validate.py | lmk123568/Paddle_Model_Analysis | d03a82591512fcad065fc69abfdbfb0835099c2d | [
"MIT"
] | 13 | 2021-07-03T05:26:56.000Z | 2022-03-19T01:34:11.000Z | ppma/imagenet/validate.py | lmk123568/Paddle_Model_Analysis | d03a82591512fcad065fc69abfdbfb0835099c2d | [
"MIT"
] | 1 | 2021-11-18T07:07:03.000Z | 2021-11-23T01:50:01.000Z | ppma/imagenet/validate.py | lmk123568/Paddle_Model_Analysis | d03a82591512fcad065fc69abfdbfb0835099c2d | [
"MIT"
] | 1 | 2022-03-19T01:34:15.000Z | 2022-03-19T01:34:15.000Z | import time
import numpy as np
import paddle
import paddle.nn.functional as F
import paddle.vision.transforms as T
from paddle.io import DataLoader, Dataset
from PIL import Image
from .utils import AverageMeter, get_val_transforms
class ImageNet2012Dataset(Dataset):
def __init__(self, data, image_size, crop_pct, normalize):
super().__init__()
self.data = data
self.transforms = get_val_transforms(image_size, crop_pct, normalize)
def __getitem__(self, idx):
# 处理图像
img_path = self.data[idx][0] # 得到某样本的路径
img = Image.open(img_path)
if img.mode != "RGB":
img = img.convert("RGB")
img = self.transforms(img) # 数据预处理
# 处理标签
label = self.data[idx][1] # 得到某样本的标签
label = np.array([label], dtype="int64") # 把标签数据类型转成int64
return img, label
def __len__(self):
return len(self.data) # 返回每个Epoch中图片数量
def val(
model, data_path, batch_size=128, img_size=224, crop_pct=0.875, normalize=0.485
):
data_list = []
with open(data_path + "/" + "val.txt") as f:
for line in f:
a, b = line.strip("\n").split(" ")
data_list.append([data_path + "/" + a, int(b)])
val_loader = DataLoader(
ImageNet2012Dataset(data_list, img_size, crop_pct, normalize),
batch_size=batch_size,
)
model.eval()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
with paddle.no_grad():
for i, (images, target) in enumerate(val_loader):
start_time = time.perf_counter()
output = model(images)
batch_time = time.perf_counter() - start_time
pred = F.softmax(output)
acc1 = paddle.metric.accuracy(pred, target)
acc5 = paddle.metric.accuracy(pred, target, k=5)
batch_size = images.shape[0]
val_acc1_meter.update(acc1.numpy()[0], batch_size)
val_acc5_meter.update(acc5.numpy()[0], batch_size)
if i % 40 == 0:
print(
f"[{i: >3}/{len(val_loader):}] top1_acc: {val_acc1_meter.avg:.4f} top5_acc: {val_acc5_meter.avg:.4f} time: {batch_time:.3f}s"
)
print(
"Overall top1_acc: {:.4f} top5_acc: {:.4f}".format(
val_acc1_meter.avg, val_acc5_meter.avg
)
)
| 29.878049 | 149 | 0.577959 |
795847e9f66e2184df61be843d0b068be088d9f6 | 18,163 | py | Python | tests/unit/states/test_glusterfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | tests/unit/states/test_glusterfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_glusterfs.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch)
# Import Salt Libs
import salt.states.glusterfs as glusterfs
import salt.utils.cloud
import salt.utils.network
import salt.modules.glusterfs as mod_glusterfs
class GlusterfsTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.glusterfs
'''
def setup_loader_modules(self):
return {
glusterfs: {
'__salt__': {'glusterfs.peer': mod_glusterfs.peer}
}
}
# 'peered' function tests: 1
def test_peered(self):
'''
Test to verify if node is peered.
'''
name = 'server1'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock_ip = MagicMock(return_value=['1.2.3.4', '1.2.3.5'])
mock_ip6 = MagicMock(return_value=['2001:db8::1'])
mock_host_ips = MagicMock(return_value=['1.2.3.5'])
mock_peer = MagicMock(return_value=True)
mock_status = MagicMock(return_value={'uuid1': {'hostnames': [name]}})
with patch.dict(glusterfs.__salt__, {'glusterfs.peer_status': mock_status,
'glusterfs.peer': mock_peer}):
with patch.object(salt.utils.network, 'ip_addrs', mock_ip), \
patch.object(salt.utils.network, 'ip_addrs6', mock_ip6), \
patch.object(salt.utils.network, 'host_to_ips', mock_host_ips):
comt = 'Peering with localhost is not needed'
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.peered(name), ret)
mock_host_ips.return_value = ['2001:db8::1']
self.assertDictEqual(glusterfs.peered(name), ret)
mock_host_ips.return_value = ['1.2.3.42']
comt = ('Host {0} already peered'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.peered(name), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
old = {'uuid1': {'hostnames': ['other1']}}
new = {'uuid1': {'hostnames': ['other1']},
'uuid2': {'hostnames': ['someAlias', name]}}
mock_status.side_effect = [old, new]
comt = 'Host {0} successfully peered'.format(name)
ret.update({'comment': comt,
'changes': {'old': old, 'new': new}})
self.assertDictEqual(glusterfs.peered(name), ret)
mock_status.side_effect = None
mock_status.return_value = {
'uuid1': {'hostnames': ['other']}
}
mock_peer.return_value = False
ret.update({'result': False})
comt = ('Failed to peer with {0},'
+ ' please check logs for errors').format(name)
ret.update({'comment': comt, 'changes': {}})
self.assertDictEqual(glusterfs.peered(name), ret)
comt = ('Invalid characters in peer name.')
ret.update({'comment': comt, 'name': ':/'})
self.assertDictEqual(glusterfs.peered(':/'), ret)
ret.update({'name': name})
with patch.dict(glusterfs.__opts__, {'test': True}):
comt = ('Peer {0} will be added.'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.peered(name), ret)
# 'volume_present' function tests: 1
def test_volume_present(self):
'''
Test to ensure that a volume exists
'''
name = 'salt'
bricks = ['host1:/brick1']
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
started_info = {name: {'status': '1'}}
stopped_info = {name: {'status': '0'}}
mock_info = MagicMock()
mock_list = MagicMock()
mock_create = MagicMock()
mock_start = MagicMock(return_value=True)
with patch.dict(glusterfs.__salt__, {
'glusterfs.info': mock_info,
'glusterfs.list_volumes': mock_list,
'glusterfs.create_volume': mock_create,
'glusterfs.start_volume': mock_start}):
with patch.dict(glusterfs.__opts__, {'test': False}):
mock_list.return_value = [name]
mock_info.return_value = started_info
comt = ('Volume {0} already exists and is started'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=True), ret)
mock_info.return_value = stopped_info
comt = ('Volume {0} already exists and is now started'.format(name))
ret.update({'comment': comt,
'changes': {'old': 'stopped', 'new': 'started'}})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=True), ret)
comt = ('Volume {0} already exists'.format(name))
ret.update({'comment': comt, 'changes': {}})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=False), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
comt = ('Volume {0} already exists'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=False), ret)
comt = ('Volume {0} already exists'
+ ' and will be started').format(name)
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=True), ret)
mock_list.return_value = []
comt = ('Volume {0} will be created'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=False), ret)
comt = ('Volume {0} will be created'
+ ' and started').format(name)
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=True), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
mock_list.side_effect = [[], [name]]
comt = ('Volume {0} is created'.format(name))
ret.update({'comment': comt,
'result': True,
'changes': {'old': [], 'new': [name]}})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=False), ret)
mock_list.side_effect = [[], [name]]
comt = ('Volume {0} is created and is now started'.format(name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(glusterfs.volume_present(name, bricks,
start=True), ret)
mock_list.side_effect = None
mock_list.return_value = []
mock_create.return_value = False
comt = 'Creation of volume {0} failed'.format(name)
ret.update({'comment': comt, 'result': False, 'changes': {}})
self.assertDictEqual(glusterfs.volume_present(name, bricks),
ret)
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(return_value=True)):
comt = ('Invalid characters in volume name.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(glusterfs.volume_present(name, bricks),
ret)
# 'started' function tests: 1
def test_started(self):
'''
Test to check if volume has been started
'''
name = 'salt'
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
started_info = {name: {'status': '1'}}
stopped_info = {name: {'status': '0'}}
mock_info = MagicMock(return_value={})
mock_start = MagicMock(return_value=True)
with patch.dict(glusterfs.__salt__,
{'glusterfs.info': mock_info,
'glusterfs.start_volume': mock_start}):
comt = ('Volume {0} does not exist'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.started(name), ret)
mock_info.return_value = started_info
comt = ('Volume {0} is already started'.format(name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(glusterfs.started(name), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
mock_info.return_value = stopped_info
comt = ('Volume {0} will be started'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.started(name), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
comt = 'Volume {0} is started'.format(name)
ret.update({'comment': comt, 'result': True,
'change': {'new': 'started', 'old': 'stopped'}})
self.assertDictEqual(glusterfs.started(name), ret)
# 'add_volume_bricks' function tests: 1
def test_add_volume_bricks(self):
'''
Test to add brick(s) to an existing volume
'''
name = 'salt'
bricks = ['host1:/drive1']
old_bricks = ['host1:/drive2']
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
stopped_volinfo = {'salt': {'status': '0'}}
volinfo = {
'salt': {
'status': '1',
'bricks': {'brick1': {'path': old_bricks[0]}}
}
}
new_volinfo = {
'salt': {
'status': '1',
'bricks': {
'brick1': {'path': old_bricks[0]},
'brick2': {'path': bricks[0]}
}
}
}
mock_info = MagicMock(return_value={})
mock_add = MagicMock(side_effect=[False, True])
with patch.dict(glusterfs.__salt__,
{'glusterfs.info': mock_info,
'glusterfs.add_volume_bricks': mock_add}):
ret.update({'comment': 'Volume salt does not exist'})
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
mock_info.return_value = stopped_volinfo
ret.update({'comment': 'Volume salt is not started'})
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
mock_info.return_value = volinfo
ret.update({'comment': 'Adding bricks to volume salt failed'})
self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret)
ret.update({'result': True})
ret.update({'comment': 'Bricks already added in volume salt'})
self.assertDictEqual(glusterfs.add_volume_bricks(name, old_bricks),
ret)
mock_info.side_effect = [volinfo, new_volinfo]
ret.update({'comment': 'Bricks successfully added to volume salt',
'changes': {'new': bricks + old_bricks,
'old': old_bricks}})
# Let's sort ourselves because the test under python 3 sometimes fails
# just because of the new changes list order
result = glusterfs.add_volume_bricks(name, bricks)
ret['changes']['new'] = sorted(ret['changes']['new'])
result['changes']['new'] = sorted(result['changes']['new'])
self.assertDictEqual(result, ret)
# 'op_version' function tests: 1
def test_op_version(self):
'''
Test setting the Glusterfs op-version
'''
name = 'salt'
current = 30707
new = 31200
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
mock_get_version = MagicMock(return_value={})
mock_set_version = MagicMock(return_value={})
with patch.dict(glusterfs.__salt__,
{'glusterfs.get_op_version': mock_get_version,
'glusterfs.set_op_version': mock_set_version}):
mock_get_version.return_value = [False, 'some error message']
ret.update({'result': False})
ret.update({'comment': 'some error message'})
self.assertDictEqual(glusterfs.op_version(name, current), ret)
mock_get_version.return_value = current
ret.update({'result': True})
ret.update({'comment': 'Glusterfs cluster.op-version for {0} already set to {1}'.format(name, current)})
self.assertDictEqual(glusterfs.op_version(name, current), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
mock_set_version.return_value = [False, 'Failed to set version']
ret.update({'result': None})
ret.update({'comment': 'An attempt would be made to set the cluster.op-version for {0} to {1}.'.
format(name, new)})
self.assertDictEqual(glusterfs.op_version(name, new), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
mock_set_version.return_value = [False, 'Failed to set version']
ret.update({'result': False})
ret.update({'comment': 'Failed to set version'})
self.assertDictEqual(glusterfs.op_version(name, new), ret)
mock_set_version.return_value = 'some success message'
ret.update({'comment': 'some success message'})
ret.update({'changes': {'old': current, 'new': new}})
ret.update({'result': True})
self.assertDictEqual(glusterfs.op_version(name, new), ret)
# 'max_op_version' function tests: 1
def test_max_op_version(self):
'''
Test setting the Glusterfs to its self reported max-op-version
'''
name = 'salt'
current = 30707
new = 31200
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
mock_get_version = MagicMock(return_value={})
mock_get_max_op_version = MagicMock(return_value={})
mock_set_version = MagicMock(return_value={})
with patch.dict(glusterfs.__salt__,
{'glusterfs.get_op_version': mock_get_version,
'glusterfs.set_op_version': mock_set_version,
'glusterfs.get_max_op_version': mock_get_max_op_version}):
mock_get_version.return_value = [False, 'some error message']
ret.update({'result': False})
ret.update({'comment': 'some error message'})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
mock_get_version.return_value = current
mock_get_max_op_version.return_value = [False, 'some error message']
ret.update({'result': False})
ret.update({'comment': 'some error message'})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
mock_get_version.return_value = current
mock_get_max_op_version.return_value = current
ret.update({'result': True})
ret.update({'comment': 'The cluster.op-version is already set to the cluster.max-op-version of {0}'.
format(current)})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
mock_get_max_op_version.return_value = new
ret.update({'result': None})
ret.update({'comment': 'An attempt would be made to set the cluster.op-version to {0}.'.
format(new)})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
mock_set_version.return_value = [False, 'Failed to set version']
ret.update({'result': False})
ret.update({'comment': 'Failed to set version'})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
mock_set_version.return_value = 'some success message'
ret.update({'comment': 'some success message'})
ret.update({'changes': {'old': current, 'new': new}})
ret.update({'result': True})
self.assertDictEqual(glusterfs.max_op_version(name), ret)
| 43.452153 | 116 | 0.525134 |
795848d35b17eaa427e2bcfc499d125a5d204ae8 | 335 | py | Python | notifyme/app.py | djbelyak/NotifyMeBot | 773a8686f910216933de5d7403c8ba8e9cdbd991 | [
"MIT"
] | null | null | null | notifyme/app.py | djbelyak/NotifyMeBot | 773a8686f910216933de5d7403c8ba8e9cdbd991 | [
"MIT"
] | 5 | 2018-03-19T17:08:47.000Z | 2018-03-27T17:08:49.000Z | notifyme/app.py | djbelyak/NotifyMeBot | 773a8686f910216933de5d7403c8ba8e9cdbd991 | [
"MIT"
] | null | null | null | ''' This package contains a main logic of application. '''
import subprocess
class App():
def __init__(self, command, notificator):
self.command = command
self.notificator = notificator
def run(self):
subprocess.run(self.command)
self.notificator.notify(' '.join(self.command) + ' complete')
| 25.769231 | 69 | 0.659701 |
795848dd33ba2b5b13e5c38433b75ea21eb272aa | 5,775 | py | Python | betty/cropper/dssim.py | theonion/betty-cropper | bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4 | [
"MIT"
] | 14 | 2015-01-13T21:24:30.000Z | 2020-12-18T23:39:35.000Z | betty/cropper/dssim.py | theonion/betty-cropper | bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4 | [
"MIT"
] | 39 | 2015-06-08T20:56:35.000Z | 2017-06-09T03:20:08.000Z | betty/cropper/dssim.py | theonion/betty-cropper | bb0e570c1eb0ddb2f39d109f996edd1d417d1fe4 | [
"MIT"
] | 5 | 2015-12-04T20:22:08.000Z | 2018-03-05T19:17:43.000Z | try:
import numpy as np
import scipy.ndimage
except ImportError:
pass
from betty.conf.app import settings
import io
import math
from PIL import Image
MIN_UNIQUE_COLORS = 4096
COLOR_DENSITY_RATIO = 0.11
QUALITY_IN_MIN = 82
ERROR_THRESHOLD = 1.3
ERROR_THRESHOLD_INACCURACY = 0.01
def compute_ssim(im1, im2, l=255):
# k1,k2 & c1,c2 depend on L (width of color map)
k_1 = 0.01
c_1 = (k_1 * l) ** 2
k_2 = 0.03
c_2 = (k_2 * l) ** 2
window = np.ones((8, 8)) / 64.0
# Convert image matrices to double precision (like in the Matlab version)
im1 = im1.astype(np.float)
im2 = im2.astype(np.float)
# Means obtained by Gaussian filtering of inputs
mu_1 = scipy.ndimage.filters.convolve(im1, window)
mu_2 = scipy.ndimage.filters.convolve(im2, window)
# Squares of means
mu_1_sq = mu_1 ** 2
mu_2_sq = mu_2 ** 2
mu_1_mu_2 = mu_1 * mu_2
# Squares of input matrices
im1_sq = im1 ** 2
im2_sq = im2 ** 2
im12 = im1 * im2
# Variances obtained by Gaussian filtering of inputs' squares
sigma_1_sq = scipy.ndimage.filters.convolve(im1_sq, window)
sigma_2_sq = scipy.ndimage.filters.convolve(im2_sq, window)
# Covariance
sigma_12 = scipy.ndimage.filters.convolve(im12, window)
# Centered squares of variances
sigma_1_sq -= mu_1_sq
sigma_2_sq -= mu_2_sq
sigma_12 -= mu_1_mu_2
if (c_1 > 0) & (c_2 > 0):
ssim_map = (((2 * mu_1_mu_2 + c_1) * (2 * sigma_12 + c_2)) /
((mu_1_sq + mu_2_sq + c_1) * (sigma_1_sq + sigma_2_sq + c_2)))
else:
numerator1 = 2 * mu_1_mu_2 + c_1
numerator2 = 2 * sigma_12 + c_2
denominator1 = mu_1_sq + mu_2_sq + c_1
denominator2 = sigma_1_sq + sigma_2_sq + c_2
ssim_map = np.ones(mu_1.size)
index = (denominator1 * denominator2 > 0)
ssim_map[index] = ((numerator1[index] * numerator2[index]) /
(denominator1[index] * denominator2[index]))
index = (denominator1 != 0) & (denominator2 == 0)
ssim_map[index] = numerator1[index] / denominator1[index]
# return MSSIM
index = np.mean(ssim_map)
return index
def unique_colors(img):
# For RGB, we need to get unique "rows" basically, as the color dimesion is an array.
# This is taken from: http://stackoverflow.com/a/16973510
color_view = np.ascontiguousarray(img).view(np.dtype((np.void,
img.dtype.itemsize * img.shape[2])))
unique = np.unique(color_view)
return unique.size
def color_density(img):
area = img.shape[0] * img.shape[1]
density = unique_colors(img) / float(area)
return density
def enough_colors(img):
return True
if unique_colors(img) < MIN_UNIQUE_COLORS:
return False
# Someday, check if the image is greyscale...
return True
def get_distortion(one, two):
# This computes the "DSSIM" of the images, using the SSIM of each channel
ssims = []
for channel in range(one.shape[2]):
one_channeled = np.ascontiguousarray(one[:, :, channel])
two_channeled = np.ascontiguousarray(two[:, :, channel])
ssim = compute_ssim(one_channeled, two_channeled)
ssims.append(ssim)
return (1 / np.mean(ssims) - 1) * 20
def detect_optimal_quality(image_buffer, width=None, verbose=False):
"""Returns the optimal quality for a given image, at a given width"""
# Open the image...
pil_original = Image.open(image_buffer)
icc_profile = pil_original.info.get("icc_profile")
if pil_original.format != "JPEG":
# Uhoh, this isn't a JPEG, let's convert it to one.
pillow_kwargs = {
"format": "jpeg",
"quality": 100,
"subsampling": 2
}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
tmp = io.BytesIO()
pil_original.save(tmp, **pillow_kwargs)
tmp.seek(0)
pil_original = Image.open(tmp)
if width:
height = int(math.ceil((pil_original.size[1] * width) / float(pil_original.size[0])))
pil_original = pil_original.resize((width, height), resample=Image.ANTIALIAS)
np_original = np.asarray(pil_original)
original_density = color_density(np_original)
# Check if there are enough colors (assuming RGB for the moment)
if not enough_colors(np_original):
return None
# TODO: Check if the quality is lower than we'd want... (probably impossible)
qmin = settings.BETTY_JPEG_QUALITY_RANGE[0]
qmax = settings.BETTY_JPEG_QUALITY_RANGE[1]
# Do a binary search of image quality...
while qmax > qmin + 1:
quality = int(round((qmax + qmin) / 2.0))
tmp = io.BytesIO()
pillow_kwargs = {
"format": "jpeg",
"quality": quality,
"subsampling": 2
}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
pil_original.save(tmp, **pillow_kwargs)
tmp.seek(0)
pil_compressed = Image.open(tmp)
np_compressed = np.asarray(pil_compressed)
density_ratio = abs(color_density(np_compressed) - original_density) / original_density
error = get_distortion(np_original, np_compressed)
if density_ratio > COLOR_DENSITY_RATIO:
error *= 1.25 + density_ratio
if error > ERROR_THRESHOLD:
qmin = quality
else:
qmax = quality
if verbose:
print("{:.2f}/{:.2f}@{}".format(error, density_ratio, quality))
if abs(error - ERROR_THRESHOLD) < ERROR_THRESHOLD * ERROR_THRESHOLD_INACCURACY:
# Close enough!
qmax = quality
break
return qmax
| 28.308824 | 95 | 0.621645 |
795849400df552db11fa03d1c5ecf66c7da7b7e9 | 942 | py | Python | alipay/aop/api/response/AlipayPassTemplateUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayPassTemplateUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayPassTemplateUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPassTemplateUpdateResponse(AlipayResponse):
def __init__(self):
super(AlipayPassTemplateUpdateResponse, self).__init__()
self._result = None
self._success = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(AlipayPassTemplateUpdateResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
if 'success' in response:
self.success = response['success']
| 26.166667 | 105 | 0.669851 |
795849843f4e04701658af972435b1f3cf483484 | 81,225 | py | Python | tensorflow/python/debug/cli/analyzer_cli_test.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/debug/cli/analyzer_cli_test.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/debug/cli/analyzer_cli_test.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Analyzer CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import cli_test_utils
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
# Helper function to accommodate MKL-enabled TensorFlow:
# MatMul op is supported by MKL and its name is prefixed with "_Mkl" during the
# MKL graph rewrite pass.
def _matmul_op_name():
return "_MklMatMul" if test_util.IsMklEnabled() else "MatMul"
def _cli_config_from_temp_file():
return cli_config.CLIConfig(
config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config"))
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,
pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def line_number_above():
return tf_inspect.stack()[1][2] - 1
def parse_op_and_node(line):
"""Parse a line containing an op node followed by a node name.
For example, if the line is
" [Variable] hidden/weights",
this function will return ("Variable", "hidden/weights")
Args:
line: The line to be parsed, as a str.
Returns:
Name of the parsed op type.
Name of the parsed node.
"""
op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "")
# Not using [-1], to tolerate any other items that might be present behind
# the node name.
node_name = line.strip().split(" ")[1]
return op_type, node_name
def assert_column_header_command_shortcut(tst,
command,
reverse,
node_name_regex,
op_type_regex,
tensor_filter_name):
tst.assertFalse(reverse and "-r" in command)
tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command)
tst.assertFalse(
not(node_name_regex) and ("-t %s" % node_name_regex) in command)
tst.assertFalse(
not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command)
def assert_listed_tensors(tst,
out,
expected_tensor_names,
expected_op_types,
node_name_regex=None,
op_type_regex=None,
tensor_filter_name=None,
sort_by="timestamp",
reverse=False):
"""Check RichTextLines output for list_tensors commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
expected_tensor_names: (list of str) Expected tensor names in the list.
expected_op_types: (list of str) Expected op types of the tensors, in the
same order as the expected_tensor_names.
node_name_regex: Optional: node name regex filter.
op_type_regex: Optional: op type regex filter.
tensor_filter_name: Optional: name of the tensor filter.
sort_by: (str) (timestamp | op_type | tensor_name) the field by which the
tensors in the list are sorted.
reverse: (bool) whether the sorting is in reverse (i.e., descending) order.
"""
line_iter = iter(out.lines)
attr_segs = out.font_attr_segs
line_counter = 0
num_dumped_tensors = int(next(line_iter).split(" ")[0])
line_counter += 1
tst.assertGreaterEqual(num_dumped_tensors, len(expected_tensor_names))
if op_type_regex is not None:
tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex,
next(line_iter))
line_counter += 1
if node_name_regex is not None:
tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex,
next(line_iter))
line_counter += 1
tst.assertEqual("", next(line_iter))
line_counter += 1
# Verify the column heads "t (ms)", "Op type" and "Tensor name" are present.
line = next(line_iter)
tst.assertIn("t (ms)", line)
tst.assertIn("Op type", line)
tst.assertIn("Tensor name", line)
# Verify the command shortcuts in the top row.
attr_segs = out.font_attr_segs[line_counter]
attr_seg = attr_segs[0]
tst.assertEqual(0, attr_seg[0])
tst.assertEqual(len("t (ms)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s timestamp", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Size")
attr_seg = attr_segs[1]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s dump_size", command)
assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,
op_type_regex, tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Op type")
attr_seg = attr_segs[2]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Op type"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s op_type", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Tensor name")
attr_seg = attr_segs[3]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s tensor_name", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
# Verify the listed tensors and their timestamps.
tensor_timestamps = []
dump_sizes_bytes = []
op_types = []
tensor_names = []
for line in line_iter:
items = line.split(" ")
items = [item for item in items if item]
rel_time = float(items[0][1:-1])
tst.assertGreaterEqual(rel_time, 0.0)
tensor_timestamps.append(rel_time)
dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))
op_types.append(items[2])
tensor_names.append(items[3])
# Verify that the tensors should be listed in ascending order of their
# timestamps.
if sort_by == "timestamp":
sorted_timestamps = sorted(tensor_timestamps)
if reverse:
sorted_timestamps.reverse()
tst.assertEqual(sorted_timestamps, tensor_timestamps)
elif sort_by == "dump_size":
sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)
if reverse:
sorted_dump_sizes_bytes.reverse()
tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)
elif sort_by == "op_type":
sorted_op_types = sorted(op_types)
if reverse:
sorted_op_types.reverse()
tst.assertEqual(sorted_op_types, op_types)
elif sort_by == "tensor_name":
sorted_tensor_names = sorted(tensor_names)
if reverse:
sorted_tensor_names.reverse()
tst.assertEqual(sorted_tensor_names, tensor_names)
else:
tst.fail("Invalid value in sort_by: %s" % sort_by)
# Verify that the tensors are all listed.
for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):
tst.assertIn(tensor_name, tensor_names)
index = tensor_names.index(tensor_name)
tst.assertEqual(op_type, op_types[index])
def assert_node_attribute_lines(tst,
out,
node_name,
op_type,
device,
input_op_type_node_name_pairs,
ctrl_input_op_type_node_name_pairs,
recipient_op_type_node_name_pairs,
ctrl_recipient_op_type_node_name_pairs,
attr_key_val_pairs=None,
num_dumped_tensors=None,
show_stack_trace=False,
stack_trace_available=False):
"""Check RichTextLines output for node_info commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
node_name: Name of the node.
op_type: Op type of the node, as a str.
device: Name of the device on which the node resides.
input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,
for the (non-control) inputs to the node.
ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the control inputs to the node.
recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the (non-control) output recipients to the node.
ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and
node name, for the control output recipients to the node.
attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a
list of 2-tuples.
num_dumped_tensors: Optional: number of tensor dumps from the node.
show_stack_trace: (bool) whether the stack trace of the node's
construction is asserted to be present.
stack_trace_available: (bool) whether Python stack trace is available.
"""
line_iter = iter(out.lines)
tst.assertEqual("Node %s" % node_name, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" Op: %s" % op_type, next(line_iter))
tst.assertEqual(" Device: %s" % device, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d input(s) + %d control input(s):" %
(len(input_op_type_node_name_pairs),
len(ctrl_input_op_type_node_name_pairs)), next(line_iter))
# Check inputs.
tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs),
next(line_iter))
for op_type, node_name in input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
# Check control inputs.
if ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" %d control input(s):" %
len(ctrl_input_op_type_node_name_pairs), next(line_iter))
for op_type, node_name in ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d recipient(s) + %d control recipient(s):" %
(len(recipient_op_type_node_name_pairs),
len(ctrl_recipient_op_type_node_name_pairs)),
next(line_iter))
# Check recipients, the order of which is not deterministic.
tst.assertEqual(" %d recipient(s):" %
len(recipient_op_type_node_name_pairs), next(line_iter))
t_recs = []
for _ in recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_recs.append((op_type, node_name))
tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)
# Check control recipients, the order of which is not deterministic.
if ctrl_recipient_op_type_node_name_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d control recipient(s):" %
len(ctrl_recipient_op_type_node_name_pairs),
next(line_iter))
t_ctrl_recs = []
for _ in ctrl_recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_ctrl_recs.append((op_type, node_name))
tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)
# The order of multiple attributes can be non-deterministic.
if attr_key_val_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual("Node attributes:", next(line_iter))
kv_pairs = []
for key, val in attr_key_val_pairs:
key = next(line_iter).strip().replace(":", "")
val = next(line_iter).strip()
kv_pairs.append((key, val))
tst.assertEqual("", next(line_iter))
tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)
if num_dumped_tensors is not None:
tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors,
next(line_iter))
tst.assertEqual("", next(line_iter))
dump_timestamps_ms = []
for _ in xrange(num_dumped_tensors):
line = next(line_iter)
tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @")
tst.assertTrue(line.strip().endswith(" ms"))
dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", ""))
tst.assertGreaterEqual(dump_timestamp_ms, 0.0)
dump_timestamps_ms.append(dump_timestamp_ms)
tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)
if show_stack_trace:
tst.assertEqual("", next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual("Traceback of node construction:", next(line_iter))
if stack_trace_available:
try:
depth_counter = 0
while True:
for i in range(5):
line = next(line_iter)
if i == 0:
tst.assertEqual(depth_counter, int(line.split(":")[0]))
elif i == 1:
tst.assertStartsWith(line, " Line:")
elif i == 2:
tst.assertStartsWith(line, " Function:")
elif i == 3:
tst.assertStartsWith(line, " Text:")
elif i == 4:
tst.assertEqual("", line)
depth_counter += 1
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
def create_analyzer_cli(dump):
"""Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
"""
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())
# Construct the handler registry.
registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
registry.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
registry.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
registry.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
registry.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
registry.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
return analyzer, registry
@test_util.run_v1_only("b/120545219")
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variables.VariableV1(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variables.VariableV1(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variables.VariableV1([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
shutil.rmtree(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|" + _matmul_op_name() + ")"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", _matmul_op_name()],
op_type_regex=("(Add|" + _matmul_op_name() + ")"))
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorWithFilterAndNodeNameExclusionWorks(self):
# First, create and register the filter.
def is_2x1_vector(datum, tensor):
del datum # Unused.
return list(tensor.shape) == [2, 1]
self._analyzer.add_tensor_filter("is_2x1_vector", is_2x1_vector)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command(
"lt", ["-f", "is_2x1_vector", "--filter_exclude_node_names", ".*v.*"])
# If the --filter_exclude_node_names were not used, then the matching
# tensors would be:
# - simple_mul_add/v:0
# - simple_mul_add/v/read:0
# - simple_mul_add/matmul:0
# - simple_mul_add/add:0
#
# With the --filter_exclude_node_names option, only the last two should
# show up in the result.
assert_listed_tensors(
self,
out, ["simple_mul_add/matmul:0", "simple_mul_add/add:0"],
[_matmul_op_name(), "Add"],
tensor_filter_name="is_2x1_vector")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, _matmul_op_name(),
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
test_attr_key_val_pairs = [("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")]
if test_util.IsMklEnabled():
test_attr_key_val_pairs.append(("_kernel", 's: "MklNameChangeOp"'))
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=test_attr_key_val_pairs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True,
stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True,
stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[(_matmul_op_name(), "simple_mul_add/matmul")],
[])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul.npy")
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-w", npy_path],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[7.0], [-2.0]], np.load(npy_path))
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]", "-s"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"Numeric summary:",
"| - + | total |",
"| 1 1 | 2 |",
"| min max mean std |",
"| -2.0 7.0 2.5 4.5 |",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(10, out.annotations)
self.assertIn(11, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[11])
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testEvalExpression(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"eval", ["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name)],
screen_info={"cols": 80})
cli_test_utils.assert_lines_equal_ignoring_whitespace(
self,
["Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
"",
"Numeric summary:",
"| - + | total |",
"| 2 2 | 4 |",
"| min max mean std |"],
out.lines[:8])
cli_test_utils.assert_array_lines_close(
self, [-14.0, 49.0, 6.25, 25.7524270701], out.lines[8:9])
cli_test_utils.assert_array_lines_close(
self, [[49.0, -14.0], [-14.0, 4.0]], out.lines[10:])
def testEvalExpressionAndWriteToNpyFile(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
npy_path = os.path.join(self._dump_root, "matmul_eval.npy")
out = self._registry.dispatch_command(
"eval",
["np.matmul(`%s`, `%s`.T)" % (tensor_name, tensor_name), "-w",
npy_path], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"from eval of expression "
"'np.matmul(`simple_mul_add/matmul:0`, "
"`simple_mul_add/matmul:0`.T)'\":",
" dtype: float64",
" shape: (2, 2)",
""], out.lines[:4])
self.assertTrue(out.lines[4].startswith("Saved value to: %s (" % npy_path))
# Load the numpy file and verify its contents.
self.assertAllClose([[49.0, -14.0], [-14.0, 4.0]], np.load(npy_path))
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(
TypeError,
"Input argument filter_name is expected to be str, ""but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
with self.assertRaisesRegexp(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
_cli_config_from_temp_file())
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegexp(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
def _findSourceLine(self, annotated_source, line_number):
"""Find line of given line number in annotated source.
Args:
annotated_source: (debugger_cli_common.RichTextLines) the annotated source
line_number: (int) 1-based line number
Returns:
(int) If line_number is found, 0-based line index in
annotated_source.lines. Otherwise, None.
"""
index = None
for i, line in enumerate(annotated_source.lines):
if line.startswith("L%d " % line_number):
index = i
break
return index
def testPrintSourceForOpNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source", [self._curr_file_path], screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
# Verify the annotation of the line that creates v.
index = self._findSourceLine(out, self._v_line_number)
self.assertEqual(
["L%d v = variables.VariableV1(v_init, name=v_name)" %
self._v_line_number,
" simple_mul_add/v"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/v",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates w.
index = self._findSourceLine(out, self._w_line_number)
self.assertEqual(
["L%d " % self._w_line_number +
"w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")",
" simple_mul_add/matmul"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/matmul",
out.font_attr_segs[index + 1][0][2].content)
# Verify the annotation of the line that creates x.
index = self._findSourceLine(out, self._x_line_number)
self.assertEqual(
["L%d " % self._x_line_number +
"x = math_ops.add(w, w, name=\"simple_mul_add/add\")",
" simple_mul_add/add"],
out.lines[index : index + 2])
self.assertEqual("pt simple_mul_add/add",
out.font_attr_segs[index + 1][0][2].content)
def testPrintSourceForTensorNamesWholeFileWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "--tensors"],
screen_info={"cols": 80})
# Verify the annotation of the line that creates u.
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u/read:0",
" simple_mul_add/u:0"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u/read:0",
out.font_attr_segs[index + 1][0][2].content)
self.assertEqual("pt simple_mul_add/u:0",
out.font_attr_segs[index + 2][0][2].content)
def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-b", "3"],
screen_info={"cols": 80})
self.assertEqual(
2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" simple_mul_add/u/Assign",
" simple_mul_add/u/read"],
out.lines[index : index + 4])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
# simple_mul_add/u/Assign is not used in this run because the Variable has
# already been initialized.
self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])
self.assertEqual("pt simple_mul_add/u/read",
out.font_attr_segs[index + 3][0][2].content)
def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"print_source",
[self._curr_file_path, "-m", "1"],
screen_info={"cols": 80})
index = self._findSourceLine(out, self._u_line_number)
self.assertEqual(
["L%d u = variables.VariableV1(u_init, name=u_name)" %
self._u_line_number,
" simple_mul_add/u",
" (... Omitted 2 of 3 op(s) ...) +5"],
out.lines[index : index + 3])
self.assertEqual("pt simple_mul_add/u",
out.font_attr_segs[index + 1][0][2].content)
more_elements_command = out.font_attr_segs[index + 2][-1][2].content
self.assertStartsWith(more_elements_command,
"ps %s " % self._curr_file_path)
self.assertIn(" -m 6", more_elements_command)
def testListSourceWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", [])
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", ".*/read"])
self.assertStartsWith(out.lines[1], "Node name regex filter: \".*/read\"")
non_tf_lib_files_start = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("Source file path")][0] + 1
non_tf_lib_files_end = [
i for i in xrange(len(out.lines))
if out.lines[i].startswith("TensorFlow Python library file(s):")][0] - 1
non_tf_lib_files = [
line.split(" ")[0] for line
in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]
self.assertIn(self._curr_file_path, non_tf_lib_files)
# Check that the TF library files are marked with special color attribute.
for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):
if not out.lines[i]:
continue
for attr_seg in out.font_attr_segs[i]:
self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or
attr_seg[2] == cli_shared.COLOR_GRAY)
def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command("list_source", ["-n", "^$"])
self.assertEqual([
"List of source files that created nodes in this run",
"Node name regex filter: \"^$\"", "",
"[No source file information.]"], out.lines)
def testListSourceWithPathAndNodeNameFiltersWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
out = self._registry.dispatch_command(
"list_source", ["-p", self._curr_file_path, "-n", ".*read"])
self.assertEqual([
"List of source files that created nodes in this run",
"File path regex filter: \"%s\"" % self._curr_file_path,
"Node name regex filter: \".*read\"", ""], out.lines[:4])
def testListSourceWithCompiledPythonSourceWorks(self):
def fake_list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
del dump, path_regex_whitelist, node_name_regex_whitelist
return [("compiled_1.pyc", False, 10, 20, 30, 4),
("compiled_2.pyo", False, 10, 20, 30, 5),
("uncompiled.py", False, 10, 20, 30, 6)]
with test.mock.patch.object(
source_utils, "list_source_files_against_dump",
side_effect=fake_list_source_files_against_dump):
out = self._registry.dispatch_command("list_source", [])
self.assertStartsWith(out.lines[4], "compiled_1.pyc")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[4][0])
self.assertStartsWith(out.lines[5], "compiled_2.pyo")
self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),
out.font_attr_segs[5][0])
self.assertStartsWith(out.lines[6], "uncompiled.py")
self.assertEqual(0, out.font_attr_segs[6][0][0])
self.assertEqual(13, out.font_attr_segs[6][0][1])
self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])
self.assertEqual("ps uncompiled.py -b 6",
out.font_attr_segs[6][0][2][1].content)
def testListInputInvolvingNodesWithMultipleOutputs(self):
"""List an input tree containing tensors from non-:0 output slot."""
with session.Session(config=no_rewrite_session_config()) as sess:
x = variables.VariableV1([1, 3, 3, 7], name="x")
_, idx = array_ops.unique(x, name="x_unique")
idx_times_two = math_ops.multiply(idx, 2, name="idx_times_two")
self.evaluate(x.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root_for_unique)
run_metadata = config_pb2.RunMetadata()
self.assertAllEqual(
[0, 2, 2, 4],
sess.run(idx_times_two,
options=run_options,
run_metadata=run_metadata))
debug_dump = debug_data.DebugDumpDir(
self._dump_root_for_unique,
partition_graphs=run_metadata.partition_graphs)
_, registry = create_analyzer_cli(debug_dump)
out = registry.dispatch_command("li", ["idx_times_two"])
self.assertEqual(
["Inputs to node \"idx_times_two\" (Depth limit = 1):",
"|- (1) x_unique:1"], out.lines[:2])
class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
# 2400 elements should exceed the default threshold (2000).
x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command registry.
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testPrintLargeTensorWithoutAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80})
# Assert that ellipses are present in the tensor value printout.
self.assertIn("...,", out.lines[4])
# 2100 still exceeds 2000.
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]"],
screen_info={"cols": 80})
self.assertIn("...,", out.lines[4])
def testPrintLargeTensorWithAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0", "-a"],
screen_info={"cols": 80})
# Assert that ellipses are not present in the tensor value printout.
self.assertNotIn("...,", out.lines[4])
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"],
screen_info={"cols": 80})
self.assertNotIn("...,", out.lines[4])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
with session.Session(config=no_rewrite_session_config()) as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.VariableV1(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer and command handler registry.
_, cls._registry = create_analyzer_cli(debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(
self, out, "control_deps/ctrl_dep_y", "Identity",
self._main_device, [("Add", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d -t control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]),
"ni -a -d -t control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]),
"ni -a -d -t control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [Add] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
@test_util.run_v1_only("b/120545219")
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session(config=no_rewrite_session_config()) as sess:
loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
cond = lambda loop_var: math_ops.less(loop_var, 10)
body = lambda loop_var: math_ops.add(loop_var, 1)
while_loop = control_flow_ops.while_loop(
cond, body, [loop_var], parallel_iterations=1)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_url = "file://%s" % cls._dump_root
watch_opts = run_options.debug_options.debug_tensor_watch_opts
# Add debug tensor watch for "while/Identity".
watch = watch_opts.add()
watch.node_name = "while/Identity"
watch.output_slot = 0
watch.debug_ops.append("DebugIdentity")
watch.debug_urls.append(debug_url)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(while_loop, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"])
self.assertEqual("Tensor \"while/Identity:0\" generated 10 dumps:",
output.lines[0])
for i in xrange(10):
self.assertTrue(output.lines[i + 1].startswith("#%d" % i))
self.assertTrue(output.lines[i + 1].endswith(
" ms] while/Identity:0:DebugIdentity"))
self.assertEqual(
"You can use the -n (--number) flag to specify which dump to print.",
output.lines[-3])
self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5):
output = self._registry.dispatch_command(
"pt", ["while/Identity:0", "-n", "%d" % i])
self.assertEqual("Tensor \"while/Identity:0:DebugIdentity (dump #%d)\":" %
i, output.lines[0])
self.assertEqual(" dtype: int32", output.lines[1])
self.assertEqual(" shape: ()", output.lines[2])
self.assertEqual("", output.lines[3])
self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")"))
def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"])
self.assertEqual([
"ERROR: Specified number (10) exceeds the number of available dumps "
"(10) for tensor while/Identity:0"
], output.lines)
if __name__ == "__main__":
googletest.main()
| 38.495261 | 80 | 0.636233 |
795849e52b781b0f9ffc1b2d5e07513fdd8b9588 | 3,787 | py | Python | ravendb/infrastructure/orders.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-10-08T17:45:44.000Z | 2018-05-29T12:16:43.000Z | ravendb/infrastructure/orders.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 5 | 2017-02-12T15:50:53.000Z | 2017-09-18T12:25:01.000Z | ravendb/infrastructure/orders.py | ravendb/RavenDB-Python-Client | 6286b459b501e755fe8e8591a48acf8616605ccd | [
"MIT"
] | 8 | 2016-07-03T07:59:12.000Z | 2017-09-18T11:22:23.000Z | import datetime
from typing import List
class Contact:
def __init__(self, name: str = None, title: str = None):
self.name = name
self.title = title
class Address:
def __init__(
self,
line1: str = None,
line2: str = None,
city: str = None,
region: str = None,
postal_code: str = None,
country: str = None,
):
self.line1 = line1
self.line2 = line2
self.city = city
self.region = region
self.postal_code = postal_code
self.country = country
class Company:
def __init__(
self,
Id: str = None,
external_id: str = None,
name: str = None,
contact: Contact = None,
address: Address = None,
phone: str = None,
fax: str = None,
):
self.Id = Id
self.external_id = external_id
self.name = name
self.contact = contact
self.address = address
self.phone = phone
self.fax = fax
class Employee:
def __init__(
self,
Id: str = None,
last_name: str = None,
first_name: str = None,
title: str = None,
address: Address = None,
hired_at: datetime.datetime = None,
birthday: datetime.datetime = None,
home_phone: str = None,
extension: str = None,
reports_to: str = None,
notes: List[str] = None,
territories: List[str] = None,
):
self.Id = Id
self.last_name = last_name
self.first_name = first_name
self.title = title
self.address = address
self.hired_at = hired_at
self.birthday = birthday
self.home_phone = home_phone
self.extension = extension
self.reports_to = reports_to
self.notes = notes
self.territories = territories
class Product:
def __init__(
self,
Id: str = None,
name: str = None,
supplier: str = None,
category: str = None,
quantity_per_unit: str = None,
price_per_unit: float = None,
units_in_stock: int = None,
units_on_order: int = None,
discontinued: bool = None,
reorder_level: int = None,
):
self.Id = Id
self.name = name
self.supplier = supplier
self.category = category
self.quantity_per_unit = quantity_per_unit
self.price_per_unit = price_per_unit
self.units_in_stock = units_in_stock
self.units_on_order = units_on_order
self.discontinued = discontinued
self.reorder_level = reorder_level
class OrderLine(object):
def __init__(
self,
product: str = None,
product_name: str = None,
price_per_unit: float = None,
quantity: int = None,
discount: float = None,
):
self.product = product
self.product_name = product_name
self.price_per_unit = price_per_unit
self.quantity = quantity
self.discount = discount
class Order:
def __init__(
self,
key: str = None,
company: str = None,
employee: str = None,
ordered_at: datetime.datetime = None,
require_at: datetime.datetime = None,
shipped_at: datetime.datetime = None,
ship_to: Address = None,
ship_via: str = None,
freight: float = None,
lines: List[OrderLine] = None,
):
self.key = key
self.company = company
self.employee = employee
self.ordered_at = ordered_at
self.require_at = require_at
self.shipped_at = shipped_at
self.ship_to = ship_to
self.ship_via = ship_via
self.freight = freight
self.lines = lines
| 26.117241 | 60 | 0.565091 |
79584afe8f772ca9476c9585560b2b8ef465c58c | 19,787 | py | Python | slam/surface_profiling.py | JulienLefevreMars/slam | 484ee9bb052e4107ef4edbbd876fe5cd6305c8fc | [
"MIT"
] | 6 | 2019-06-07T16:01:06.000Z | 2020-12-04T12:43:54.000Z | slam/surface_profiling.py | JulienLefevreMars/slam | 484ee9bb052e4107ef4edbbd876fe5cd6305c8fc | [
"MIT"
] | 25 | 2020-06-22T20:42:06.000Z | 2021-01-01T09:52:22.000Z | slam/surface_profiling.py | JulienLefevreMars/slam | 484ee9bb052e4107ef4edbbd876fe5cd6305c8fc | [
"MIT"
] | 17 | 2019-09-10T13:19:03.000Z | 2021-12-14T15:53:49.000Z | import numpy as np
from scipy.spatial.distance import cdist
import trimesh
import trimesh.intersections
import trimesh.triangles
import slam.geodesics
import slam.utils as utils
def cortical_surface_profiling(mesh, rot_angle, r_step, max_samples):
"""
Surface profiling for a given cortical surface.
NOTE:
This function returns 2D profiling coordinates directly instead of 3D.
These 2D points are used to generate the feature Maps.
:param mesh: trimesh object
The cortical surface mesh.
:param rot_angle: float
Degree of rotation angle.
:param r_step: float
Length of sampling steps.
:param max_samples:
Maximum of samples in one profiles.
:return: (N_vertex, N_p, N_s) float
Profiles points in their 2D coordinates.
(x, y) respectively
"""
vert = mesh.vertices
poly = mesh.faces.astype(np.int32)
norm = mesh.vertex_normals
# compute the geodesic map of cortical surface within the specified radius
# NOTE: This needs some time
area_radius = r_step * max_samples * 2
area_geodist = slam.geodesics.local_gdist_matrix(mesh, area_radius)
#
profile_samples_x = []
profile_samples_y = []
length = len(vert)
for i in range(length):
# for every vertex, do the surface profiling.
vert_i = vert[i]
vert_norm_i = norm[i]
# limit the intersection area into the area_radius (on Distmap) from
# center
vert_distmap = area_geodist[i].toarray()[0]
area_geodist_v = np.where(vert_distmap > 0)[0]
area_geodist_faces = vert2poly_indices(area_geodist_v, poly)
intersect_mesh = mesh.submesh(np.array([area_geodist_faces]))[0]
# randomly select initial direction of rotation R0
init_rot_dir = np.array([1, 1, 1]) - vert_i
# get the profile samplings on surface
sam_prof = surface_profiling_vert(
vert_i,
vert_norm_i,
init_rot_dir,
rot_angle,
r_step,
max_samples,
intersect_mesh)
# compute the 2D coordinates (x, y) for all profile points
sample_x, sample_y = compute_profile_coord_x_y(
sam_prof, vert[i], norm[i])
profile_samples_x.append(sample_x)
profile_samples_y.append(sample_y)
return np.array(profile_samples_x), np.array(profile_samples_y)
def surface_profiling_vert(
vertex,
vert_norm,
init_rot_dir,
rot_angle,
r_step,
max_samples,
mesh):
"""
Implement the profile sampling process for a given vertex.
NOTE:
For a given vertex,
Number of profiles N_p = 360/theta
For each profiles,
Number of Sampling points N_s = max_samples
:param vertex: (3,) float
Target vertex (center vertex)
:param vert_norm: (3,) float
Vertex normal
:param init_rot_dir: (3, )
Initial direction of rotation R0
:param rot_angle: (3,) float
Degree of rotation angle
:param r_step: float
Length of sampling steps
:param max_samples: int
Maximum of samples in one profiles
:param mesh: trimesh object
Intersecting mesh
:return: (N_p, N_s, 3) float
Profiles points in 3D coordinates.
"""
profile_list = [] # record all the samples x and y
vertex = np.array(vertex)
# project the dir_R0 onto the tangent plane
rot_vec0 = utils.project_vector2tangent_plane(vert_norm, init_rot_dir)[0]
round_angle = 360
for i in range(int(round_angle / rot_angle)):
# set the rotation directions
rot_angle_alpha = (i * rot_angle) * 1.0 / 360 * 2 * np.pi
rot_mat_alpha = utils.get_rotate_matrix(
vert_norm, rot_angle_alpha)
rot_vec_alpha = np.dot(rot_vec0, rot_mat_alpha)
p_norm = np.cross(vert_norm, rot_vec_alpha)
# Get the intersection lines
# the lines contains the rotation direction and the reverse one.
intersect_lines = trimesh.intersections.mesh_plane(
mesh, p_norm, vertex)
# Select the points in the direction of rotation vector
points_i, _, _ = select_points_orientation(
intersect_lines, rot_vec_alpha, vertex, vert_norm)
# Calculate the samples of profiles
points_profile = compute_profiles_sampling_points(
points_i, vertex, max_samples, r_step)
profile_list.append(points_profile)
return np.array(profile_list)
def second_round_profiling_vert(
vertex,
vert_norm,
init_rot_dir,
rot_angle,
r_step,
max_samples,
mesh,
mesh_face_index):
"""
Implement the profile sampling process to get the feature values of each
profiling points.
The function name comes from the description of the method in the article。
Different from the surface_profiling_vert, the mesh_face_index is
obligatory.
:param vertex: (3,) float
Target vertex (center vertex)
:param vert_norm: (3,) float
Vertex normal
:param init_rot_dir: (3, )
Initial direction of rotation R0
:param rot_angle: (3,) float
Degree of rotation angle
:param r_step: float
Length of sampling steps
:param max_samples: int
Maximum of samples in one profiles
:param mesh: trimesh object
Intersecting mesh
:param mesh_face_index:
Indices of polygons of mesh.
Use to record which polygon the sampling points belongs to.
:return:
profile_points: (N_p, N_s, 3, 3) float
For each profile points contain [p1, p2, sample_points],
where p1, p2 are the points used to calculate the sampling points.
profile_intersect_faces: ((N_p, N_s,) int
"""
profile_points = [] # record all the profiling points and interpolation
# points
profile_intersect_faces = [] # record all the faces id that contain the
# sample points
vertex = np.array(vertex)
# project the dir_R0 onto the tangent plane
rot_vec0 = utils.project_vector2tangent_plane(vert_norm, init_rot_dir)[0]
round_angle = 360
for i in range(int(round_angle / rot_angle)):
# set the rotation directions
rot_angle_alpha = (i * rot_angle) * 1.0 / 360 * 2 * np.pi
rot_mat_alpha = slam.utils.get_rotate_matrix(
vert_norm, rot_angle_alpha)
rot_vec_alpha = np.dot(rot_vec0, rot_mat_alpha)
p_norm = np.cross(vert_norm, rot_vec_alpha)
# Get the intersection lines
# the lines contains the rotation direction and the reverse one.
intersect_lines, faces = trimesh.intersections.mesh_plane(
mesh, p_norm, vertex, return_faces=True)
# get the global index of faces
intersect_fm_index = mesh_face_index[faces]
# Select the points in the direction of rotation vector
orient_points_i, orient_p_id, ori_lines_id = select_points_orientation(
intersect_lines, rot_vec_alpha, vertex, vert_norm)
orient_face_id = intersect_fm_index[ori_lines_id]
# Calculate the samples of profiles
points_interp_profile, cor_faces_index = \
compute_profiles_sampling_points(
orient_points_i, vertex, max_samples, r_step, orient_face_id)
profile_points.append(points_interp_profile)
profile_intersect_faces.append(cor_faces_index)
return np.array(profile_points), np.array(profile_intersect_faces)
def compute_profiles_sampling_points(points_intersect, origin, max_samples,
r_step, face_id=None):
"""
Calculate the sampling points on each profiles.
:param points_intersect: (n, 3) float
:param origin: (3,) float
origin vertex
:param max_samples: int
Maximum of samples in one profiles
:param r_step: float
Length of sampling steps
:param face_id: (n,) int
Indices of polygons which intersecting points belong to.
Default is None, it is only used in the second round profiling.
:return:
When face_id is None, return
sampling points on profiles: (n, 3) float
Otherwise,
points_interpolate_profile: (n, 3, 3) float
contains [p1, p2, sample_points]
where p1, p2 are the points used to calculate the sampling
points.
cor_faces_index: (n,)
the corresponding faces of profile points
"""
if len(points_intersect) == 0:
# the origin point is out of intersection points
profile_points = list(np.zeros([max_samples, 3]))
return profile_points
# record the length of segment
length_sum = np.linalg.norm(points_intersect[0] - origin)
minued_lenth = 0
count_i = 0
# record the i when the sample distance firstly exceed the maximum
# of intersection
exceed_index = 0
exceed_bool = True
count_max = len(points_intersect)
profile_points = []
# Record the two end-points and sampling points
points_interpolate_profile = []
# the corresponding faces
cor_faces_index = []
for j in range(max_samples):
sample_dist = (j + 1) * r_step
if sample_dist <= length_sum and count_i == 0:
# the point is between center point and the closest one
point0 = origin
point1 = points_intersect[0]
elif sample_dist <= length_sum and count_i != 0:
point0 = points_intersect[count_i - 1]
point1 = points_intersect[count_i]
else:
minued_lenth = length_sum
count_i += 1
# the distance of sample exceed the local maximum
if count_i == count_max:
# first time arrive at boundary
if exceed_bool:
exceed_index = j
exceed_bool = False
count_i -= 1
sample_dist = (exceed_index + 1) * r_step
point0 = points_intersect[count_i - 1]
point1 = points_intersect[count_i]
else:
point0 = points_intersect[count_i - 1]
point1 = points_intersect[count_i]
length_sum += np.linalg.norm(point1 - point0)
if np.linalg.norm(point1 - point0) == 0:
alpha = 0
else:
alpha = (sample_dist - minued_lenth) / \
np.linalg.norm(point1 - point0)
sample_point = (1 - alpha) * point0 + alpha * point1
profile_points.append(sample_point)
points_interpolate_profile.append([point0, point1, sample_point])
# save the related intersect mesh faces
if face_id is not None:
cor_faces_index.append(face_id[count_i])
if face_id is None:
return profile_points
return points_interpolate_profile, cor_faces_index
def vert2poly_indices(vertex_array, poly_array):
"""
Find vertex-polygon indices from the polygons array of vertices
TODO There is a func in the lastest trimesh:
trimesh.geometry.vertex_face_indices()
:param vertex_array:
:param poly_array:
:return:
"""
vert_poly_arr = np.array([], dtype=int)
for i in range(len(vertex_array)):
poly_i = np.where(poly_array == vertex_array[i])[0]
vert_poly_arr = np.hstack((vert_poly_arr, poly_i))
return np.unique(vert_poly_arr)
def select_points_orientation(intersect_points, r_alpha, origin, norm):
"""
Select points in a specified orientation,
and ordered them by distance from the center.
:param intersect_points: (n, 2, 3) float
Points of intersecting lines.
:param r_alpha: (3,) float
Orientation vector
:param origin: (3,) float
Origin point
:param norm: (3,) float
Normal of origin point
:return: orient_points, (n, 3) float
orient_p_indices, (n,) int
lines_indices, (n,) int
Ordered points in the orientation.
"""
points_i = intersect_points.reshape(intersect_points.size // 3, 3)
# find the center points
p_idx, count_coord = np.unique(np.where(points_i == origin)[0],
return_counts=True)
origin_index = p_idx[np.where(count_coord == 3)[0]]
if len(origin_index) == 0:
# the intersection result exclude origin point
orient_points = []
orient_p_indices = []
lines_indices = []
return orient_points, orient_p_indices, lines_indices
ordered_points, ordered_p_indices = radial_sort(points_i, origin, norm)
orientation_vec = np.dot(ordered_points - origin, r_alpha)
orient_point_idx = np.where(orientation_vec > 0)[0][::2]
orient_points = ordered_points[orient_point_idx]
orient_p_indices = ordered_p_indices[orient_point_idx]
# find the closest point
p2o_len = cdist(orient_points, np.array([origin]), metric='euclidean')
p2o_len = p2o_len.reshape(p2o_len.shape[0])
ordered_p2o = np.argsort(p2o_len)
orient_points = orient_points[ordered_p2o]
orient_p_indices = orient_p_indices[ordered_p2o]
# get the ordered intersection lines
ori_lines_indices = orient_p_indices // 2
return orient_points, orient_p_indices, ori_lines_indices
def radial_sort(points,
origin,
normal):
"""
NOTE:
This function is derived from the
trimesh.points.radial_sort(points_i, origin, norm)
I overwrite this function to return both the coordinates and indices of
the points.
Sorts a set of points radially (by angle) around an
an axis specified by origin and normal vector.
Parameters
--------------
points : (n, 3) float
Points in space
origin : (3,) float
Origin to sort around
normal : (3,) float
Vector to sort around
Returns
--------------
ordered : (n, 3) float
Same as input points but reordered
"""
# create two axis perpendicular to each other and the normal,
# and project the points onto them
axis0 = [normal[0], normal[2], -normal[1]]
axis1 = np.cross(normal, axis0)
pt_vec = points - origin
pr0 = np.dot(pt_vec, axis0)
pr1 = np.dot(pt_vec, axis1)
# calculate the angles of the points on the axis
angles = np.arctan2(pr0, pr1)
# return the points and their indices sorted by angle
return points[(np.argsort(angles))], np.argsort(angles)
def compute_profile_coord_x_y(profile, origin, normal):
"""
Calculate the 2D coordinates of the profiling points in their rotation
planes
These points are used to generate the feature maps mentioned in the
articles.
:param profile: (N_p, N_s, 3) float
Sampling points of profiles in 3D
:param origin: (3,) float
Center vertex of profiles
:param normal: (3,) float
Normal of origin
:return: (N_p, N_s) float
The coordinate x, y
"""
num_prof = len(profile)
num_sample = len(profile[0])
# flat the samples
profile_samples = profile.reshape([num_prof * num_sample, 3])
# get the vector
pro_sam_vec = profile_samples - origin
vec_x = utils.project_vector2tangent_plane(normal, pro_sam_vec)
vec_y = utils.project_vector2vector(normal, pro_sam_vec)
# the length of the vector that projected onto the normal vector
# x
length_x = np.linalg.norm(vec_x, axis=1)
x = length_x.reshape([num_prof, num_sample])
# y
sign_y = np.sign((profile_samples - origin).dot(normal))
length_y = np.linalg.norm(vec_y, axis=1) * sign_y
y = length_y.reshape([num_prof, num_sample])
return x, y
def get_texture_value_on_profile(texture, mesh, profiling_samples,
profiling_samples_fid):
"""
Calculate the texture values of each points on profiles by barycentric
interpolation
:param texture: slam texture
:param mesh: trimesh object
:param profiling_samples: (N, N_p, N_s, 3, 3) float
N = Number of center vertices for surface profiling.
N_p = Number of profiles for each center.
N_s = Number of sampling points on each profiles.
3 = [p1, p2, sampling points]
3 = (3,) float
:param profiling_samples_fid: (N, N_p, N_s) int
Faces id corresponding to the profile sampling points.
:return:
texture_profile: (N, N_p, N_s) float
"""
# compute the barycentric parameters of each profile point to its
# co-faces of mesh
barycentric_coord = compute_profile_barycentric_para(profiling_samples,
mesh,
profiling_samples_fid)
# compute the features of each profile
tex_arr = texture.darray[0]
texture_profile = compute_profile_texture_barycentric(tex_arr, mesh,
profiling_samples_fid,
barycentric_coord)
return texture_profile
def compute_profile_barycentric_para(profile_sample_points, mesh, triangle_id):
"""
Compute the barycentric parameters of each points on profiles
:param profile_sample_points: (N, N_p, N_s, 3, 3) float
N = Number of center vertices for surface profiling.
N_p = Number of profiles for each center.
N_s = Number of sampling points on each profiles.
3 = [p1, p2, sampling points]
3 = (3,) float
:param mesh: trimesh object
:param triangle_id: (N, N_p, N_s) int
Faces id corresponding to the profile sampling points.
:return:
barycentric: (N, N_p, N_s, 3) float
Barycentric coordinates for all profiles points
"""
if len(profile_sample_points.shape) != 5:
raise Exception('Wrong type of profile_sample_points, '
'it must be (N, N_p, N_s, 3, 3).')
vert = mesh.vertices
poly = mesh.faces
# get the sample points on profile
sample_points_profile = profile_sample_points[:, :, :, 2]
sample_points = sample_points_profile.reshape(
sample_points_profile.size // 3, 3)
# get the faces
triangle_id = triangle_id.reshape(triangle_id.size)
triangles_v = vert[poly[triangle_id]]
barycentric = trimesh.triangles.points_to_barycentric(triangles_v,
sample_points)
barycentric = barycentric.reshape(len(sample_points_profile),
len(sample_points_profile[0]),
len(sample_points_profile[0][0]), 3)
return barycentric
def compute_profile_texture_barycentric(texture, mesh, triangle_id,
barycentric_coord):
"""
Compute the texture values of each points on profiles
:param texture: darray of slam texture
:param mesh: trimesh object
:param triangle_id: (N, N_p, N_s) int
Faces id corresponding to the profile sampling points.
:param barycentric_coord: (N, N_p, N_s, 3) float
Barycentric coordinates for all profiles points
:return:
"""
num_profiles = len(barycentric_coord)
num_areas = len(barycentric_coord[0])
num_sides = len(barycentric_coord[0][0])
poly = mesh.faces
triangle_id = triangle_id.reshape(triangle_id.size)
barycentric = barycentric_coord.reshape(barycentric_coord.size // 3, 3)
feature_tri_points = texture[poly[triangle_id]]
texture_profile = np.dot(feature_tri_points * barycentric, [1, 1, 1])
return texture_profile.reshape(num_profiles, num_areas, num_sides)
| 34.472125 | 80 | 0.644211 |
79584b29898b30b30bf18050f26b065f7b273c76 | 1,672 | py | Python | config.py | Mohllal/flasky | 324256dd43826961a85f7432b7af8b692727372e | [
"MIT"
] | null | null | null | config.py | Mohllal/flasky | 324256dd43826961a85f7432b7af8b692727372e | [
"MIT"
] | 4 | 2020-03-24T16:39:58.000Z | 2022-03-08T21:09:29.000Z | config.py | Mohllal/flasky | 324256dd43826961a85f7432b7af8b692727372e | [
"MIT"
] | 2 | 2020-03-26T02:27:48.000Z | 2020-12-29T09:40:01.000Z | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'secretkey'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = os.environ.get('FLASKY_POSTS_PER_PAGE') or 25
FLASKY_FOLLOWERS_PER_PAGE = os.environ.get('FLASKY_FOLLOWERS_PER_PAGE') or 15
FLASKY_COMMENTS_PER_PAGE = os.environ.get('FLASKY_COMMENTS_PER_PAGE') or 30
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 32.153846 | 81 | 0.703349 |
79584d27bf5293812d00f9c67add595f63890376 | 1,642 | py | Python | ur_robot_driver/tests/utils/ur_msg.py | UniversalRobots/Universal_Robots_Isaac_Driver | 7a0dae53b40437ad5b84906175003e21ebfdc3fe | [
"Apache-2.0"
] | 16 | 2020-09-23T13:27:14.000Z | 2022-01-21T14:58:33.000Z | ur_robot_driver/tests/utils/ur_msg.py | UniversalRobots/Universal_Robots_Isaac_Driver | 7a0dae53b40437ad5b84906175003e21ebfdc3fe | [
"Apache-2.0"
] | 3 | 2021-06-10T08:02:46.000Z | 2022-01-27T10:41:16.000Z | ur_robot_driver/tests/utils/ur_msg.py | UniversalRobots/Universal_Robots_Isaac_Driver | 7a0dae53b40437ad5b84906175003e21ebfdc3fe | [
"Apache-2.0"
] | 3 | 2021-02-24T01:53:48.000Z | 2021-06-03T15:51:08.000Z | import glob
import capnp
from packages.pyalice import Message
def create_ur_msg_dictornary():
"""Load all the capnp'n'proto schemata in the ur_msg folder. The function will glob through all the
files with "*.capnp" extension name."""
capnp_dict = {}
ur_capnp_files = glob.glob("packages/universal_robots/ur_msg/*.capnp")
for capnp_file in ur_capnp_files:
module = capnp.load(capnp_file)
for name, obj in module.__dict__.items():
if obj.__class__.__name__ == "_StructModule":
capnp_dict[name] = obj
return capnp_dict
def ur_capnp_schema_type_id_dict():
"""Creates a dictionary which maps Capn'proto type ids to class schemata."""
capnp_files = glob.glob("packages/universal_robots/ur_msg/*.capnp")
result = {}
for capnp_f in capnp_files:
module = capnp.load(capnp_f)
for name, obj in module.__dict__.items():
if obj.__class__.__name__ == "_StructModule":
assert name not in result
result[obj.schema.node.id] = obj
return result
UR_CAPNP_DICT = create_ur_msg_dictornary()
UR_CAPNP_TYPE_ID_DICT = ur_capnp_schema_type_id_dict()
def create_ur_msg(msg_type):
"""Creates a proto message for populating and publishing from specified proto name."""
msg = Message.MessageBuilder()
msg.proto = UR_CAPNP_DICT[msg_type].new_message()
return msg
def get_ur_msg(msg):
"""Creates a wrapper for received message (Read-Only)."""
data = msg._message.get_capn_proto_bytes()
if data:
msg._proto = UR_CAPNP_TYPE_ID_DICT[msg.type_id].from_bytes(data)
return msg._proto | 35.695652 | 103 | 0.694275 |
79584dc63464947f796395884e01cb066802e51c | 2,947 | py | Python | sicp_1.1.1/scheme.py | cellularmitosis/scheme_interpreters | bd4bbdd114b8a0a5ca0584184788181ae62556b4 | [
"MIT"
] | null | null | null | sicp_1.1.1/scheme.py | cellularmitosis/scheme_interpreters | bd4bbdd114b8a0a5ca0584184788181ae62556b4 | [
"MIT"
] | null | null | null | sicp_1.1.1/scheme.py | cellularmitosis/scheme_interpreters | bd4bbdd114b8a0a5ca0584184788181ae62556b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# a scheme interpreter which can handle SICP through section 1.1.1 and exercise 1.1.1.
import sys
def main():
for combination in tokenize_combinations(read_source_file(), 0):
print evaluate(combination)
def read_source_file():
source_fname = sys.argv[1]
with open(source_fname, "r") as fd:
return fd.read()
def tokenize_combinations(text, index):
combinations = []
while index < len(text):
ch = text[index]
if ch == "(":
(combination, new_index) = tokenize_combination(text, index)
combinations.append(combination)
index = new_index
assert text[index] == ")"
elif ch not in [" ", "\n"]:
(literal, new_index) = tokenize_word(text, index)
combinations.append(literal)
index = new_index
index += 1
continue
return combinations
def tokenize_combination(text, index):
assert text[index] == "("
index += 1
tokens = []
while index < len(text):
if text[index] == ")":
break
elif text[index] == "(":
(new_tokens, new_index) = tokenize_combination(text, index)
tokens.append(new_tokens)
index = new_index + 1
elif text[index] in [" ", "\n"]:
index += 1
continue
else:
(word, new_index) = tokenize_word(text, index)
if len(word) > 0:
tokens.append(word)
index = new_index
assert len(tokens) > 0
return (tokens, index)
def tokenize_word(text, index):
word = ""
while index < len(text):
ch = text[index]
if ch in [" ", "\n", ")"]:
break
word += ch
index += 1
return (word, index)
def evaluate(combination):
evaluated_combination = []
if type(combination) == list:
for token in combination:
if type(token) == list:
evaluated_combination.append(evaluate(token))
else:
evaluated_combination.append(token)
else:
evaluated_combination = combination
parsed_combination = parse_combination(evaluated_combination)
if type(parsed_combination) == list:
operator = parsed_combination[0]
operands = parsed_combination[1:]
return operator(operands)
else:
return parsed_combination
def parse_combination(combination):
if type(combination) == list:
operator = parse_operator(combination[0])
operands = [parse_operand(token) for token in combination[1:]]
return [operator] + operands
else:
return parse_operand(combination)
def parse_operand(token):
if type(token) == str:
if "." in token:
return float(token)
else:
return int(token)
else:
return token
def parse_operator(token):
if token == "+":
return add
elif token == "-":
return subtract
elif token == "*":
return multiply
elif token == "/":
return divide
else:
assert False, "unknown operator: %s" % token
def add(operands):
return reduce(lambda x, y: x + y, operands)
def subtract(operands):
return reduce(lambda x, y: x - y, operands)
def multiply(operands):
return reduce(lambda x, y: x * y, operands)
def divide(operands):
return reduce(lambda x, y: x / y, operands)
if __name__ == "__main__":
main()
| 23.388889 | 86 | 0.685443 |
79584eec7b09f60978e12bfbaa4180149d42e1a7 | 4,959 | py | Python | backend/api/issues/routes.py | accorvin/backlog-ballot | 8428701dfd66bf86a8bd97a795d9fa198f79ee81 | [
"MIT"
] | null | null | null | backend/api/issues/routes.py | accorvin/backlog-ballot | 8428701dfd66bf86a8bd97a795d9fa198f79ee81 | [
"MIT"
] | null | null | null | backend/api/issues/routes.py | accorvin/backlog-ballot | 8428701dfd66bf86a8bd97a795d9fa198f79ee81 | [
"MIT"
] | null | null | null | import traceback
from api import db
from api.models import Issue
from api.issues import bp
from flask import current_app, jsonify, request
def _get_all_issues(page=1, issues_per_page=15):
issues = Issue.query.order_by(Issue.created.desc())
paginate_issues = issues.paginate(page, issues_per_page, False)
issues = [i.serialize for i in paginate_issues.items]
return issues
@bp.route('/exists', methods=['POST'])
def issue_exists():
post_data = request.get_json()
required_keys = [
'jiraKey'
]
for key in required_keys.copy():
if key in post_data:
required_keys.remove(key)
if len(required_keys) > 0:
msg = ('The following required keys about the issue were not '
'specified: {missing}').format(missing=required_keys)
response = {
'code': 'data_missing',
'description': msg
}
return jsonify(response), 400
jira_key = post_data['jiraKey']
exists = Issue.query.filter_by(jira_key=jira_key).count() > 0
if exists:
msg = 'Issue with key {key} already exists'
current_app.logger.debug(msg.format(key=jira_key))
else:
msg = 'Issue with key {key} does not exist'
current_app.logger.debug(msg.format(key=jira_key))
response = {'exists': exists}
return jsonify(response), 200
@bp.route('/all', methods=['GET'])
def get_all_issues():
page = request.args.get('page', 1, type=int)
issues_per_page = request.args.get('issues_per_page', 20, type=int)
msg = 'Attempting to get issues'
current_app.logger.info(msg)
try:
issues = Issue.query.order_by(Issue.created.desc())
except Exception:
msg = 'Error while getting issues: {exc}'
current_app.logger.error(msg.format(exc=traceback.format_exc()))
response_object = {
'code': 'fail',
'message': msg.format(exc=traceback.format_exc())
}
return jsonify(response_object), 500
current_app.logger.info('Successfully fetched issues')
paginate_issues = issues.paginate(page, issues_per_page, False)
issues = [i.serialize for i in paginate_issues.items]
response_object = {
'issues': issues
}
return jsonify(response_object), 200
@bp.route('/vote', methods=['POST', 'OPTIONS'])
def vote():
if request.method != 'POST':
response = {}
return jsonify(response), 200
post_data = request.get_json()
if request.method == 'POST' and post_data is None:
current_app.logger.error('Throwing error')
response = {
'code': 'no_data',
'message': 'No post data was supplied'
}
return jsonify(response), 400
required_keys = [
'issueId'
]
current_app.logger.debug(post_data)
for key in required_keys.copy():
if key in post_data:
required_keys.remove(key)
if len(required_keys) > 0:
msg = ('The following required keys about the issue were not '
'specified: {missing}').format(missing=required_keys)
response = {
'code': 'data_missing',
'description': msg
}
return jsonify(response), 400
issue_id = post_data['issueId']
issue = Issue.query.filter(Issue.id == issue_id)[0]
current_app.logger.debug('Incrementing vote on issue {0}'.format(issue))
issue.increment_votes()
db.session.add(issue)
db.session.commit()
current_app.logger.debug('Successfully incremented vote')
issues = _get_all_issues()
response = {'issues': issues}
return jsonify(response), 201
@bp.route('/new', methods=['POST'])
def add_issue():
# get the post data
post_data = request.get_json()
required_keys = [
'title',
'description',
'created',
'jira_issue_url',
'jira_key'
]
for key in required_keys.copy():
if key in post_data:
required_keys.remove(key)
if len(required_keys) > 0:
msg = ('The following required keys about the issue were not '
'specified: {missing}').format(missing=required_keys)
response = {
'code': 'data_missing',
'description': msg
}
return jsonify(response), 400
title = post_data['title']
description = post_data['description']
created = post_data['created']
jira_issue_url = post_data['jira_issue_url']
jira_key = post_data['jira_key']
# store issue
issue = Issue(
title=title,
description=description,
created=created,
jira_issue_url=jira_issue_url,
jira_key=jira_key
)
db.session.add(issue)
db.session.commit()
msg = 'Successfully saved a new issue'
current_app.logger.info(msg)
response_object = {
'code': 'success',
'description': 'Issue stored successfully'
}
return jsonify(response_object), 201
| 29.517857 | 76 | 0.623916 |
79584f49448ea81f6103162677e2d5eb9c2c867f | 2,127 | py | Python | wikimedia_thumbor/app.py | wikimedia/operations-software-thumbor-plugins | b30f1594e05118a1d2ed77a886d270866206d08a | [
"MIT"
] | null | null | null | wikimedia_thumbor/app.py | wikimedia/operations-software-thumbor-plugins | b30f1594e05118a1d2ed77a886d270866206d08a | [
"MIT"
] | null | null | null | wikimedia_thumbor/app.py | wikimedia/operations-software-thumbor-plugins | b30f1594e05118a1d2ed77a886d270866206d08a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2016 Wikimedia Foundation
import manhole
import os.path
import tempfile
import thumbor.engines
from thumbor.utils import logger, which
from thumbor.handlers import ContextHandler
from wikimedia_thumbor.core import Extensions
from wikimedia_thumbor.core.app import App as CommunityCoreApp
class App(CommunityCoreApp):
def __init__(self, context):
if context.config.get('MANHOLE_DEBUGGING', None):
logger.debug('Installing manhole')
socket = 'manhole-%s' % context.server.port
socket_path = os.path.join(
tempfile.gettempdir(),
socket
)
manhole.install(socket_path=socket_path)
# The gifsicle engine needs to work, regardless of
# USE_GIFSICLE_ENGINE being on or not
context.server.gifsicle_path = which('gifsicle')
# T178072 Disable Thumbor's built-in EXIF parsing, which
# emits logger.error messages constantly because it's trying
# to parse our truncated buffer. EXIF parsing is done in our
# imagemagick engine instead.
thumbor.engines.METADATA_AVAILABLE = False
super(App, self).__init__(context)
# We override this to avoid the catch-all ImagingHandler from
# Thumbor which prevents us from 404ing properly on completely
# broken URLs.
def get_handlers(self):
'''Return a list of tornado web handlers.
'''
handlers = []
for extensions in Extensions.extensions:
for handler in extensions.handlers:
# Inject the context if the handler expects it.
if issubclass(handler[1], ContextHandler):
if len(handler) < 3:
handler = list(handler)
handler.append(dict(context=self.context))
else:
handler[2]['context'] = self.context
handlers.append(handler)
return handlers
| 31.279412 | 68 | 0.633756 |
79584f5d9bef30d92dbd270119a021be67510abb | 319 | py | Python | profiles/forms.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | profiles/forms.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | profiles/forms.py | Shirhussain/Find-my-match | f11f64ebb0f2145d8cd9a352bd2c29bf30c85088 | [
"MIT"
] | null | null | null | from django import forms
from .models import UserJob, Profile
class UserJobForm(forms.ModelForm):
class Meta:
model = UserJob
fields = ['position', 'location', 'employer_name']
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['picture', 'location']
| 21.266667 | 58 | 0.655172 |
79584f5e238b406decb8f83e2695ebc50bba880a | 1,467 | py | Python | src/azure-cli/azure/cli/command_modules/acr/credential.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 3,287 | 2016-07-26T17:34:33.000Z | 2022-03-31T09:52:13.000Z | src/azure-cli/azure/cli/command_modules/acr/credential.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 19,206 | 2016-07-26T07:04:42.000Z | 2022-03-31T23:57:09.000Z | src/azure-cli/azure/cli/command_modules/acr/credential.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 2,575 | 2016-07-26T06:44:40.000Z | 2022-03-31T22:56:06.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from ._utils import get_registry_by_name
def acr_credential_show(cmd, client, registry_name, resource_group_name=None):
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
if registry.admin_user_enabled: # pylint: disable=no-member
return client.list_credentials(resource_group_name, registry_name)
raise admin_not_enabled_error(registry_name)
def acr_credential_renew(cmd, client, registry_name, password_name, resource_group_name=None):
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
regenerate_credential_parameters = {'name': password_name}
if registry.admin_user_enabled: # pylint: disable=no-member
return client.regenerate_credential(
resource_group_name, registry_name, regenerate_credential_parameters)
raise admin_not_enabled_error(registry_name)
def admin_not_enabled_error(registry_name):
return CLIError("Run 'az acr update -n {} --admin-enabled true' to enable admin first.".format(registry_name))
| 45.84375 | 114 | 0.694615 |
7958507854ee529598362cf7963e91147f0a8d3e | 590 | py | Python | Session_1/Problem_1/plot_energies.py | jcartus/FHI-AIMS_Tutorials | d91c6865b3192e2e56f4572593255d52fd6191d8 | [
"MIT"
] | null | null | null | Session_1/Problem_1/plot_energies.py | jcartus/FHI-AIMS_Tutorials | d91c6865b3192e2e56f4572593255d52fd6191d8 | [
"MIT"
] | null | null | null | Session_1/Problem_1/plot_energies.py | jcartus/FHI-AIMS_Tutorials | d91c6865b3192e2e56f4572593255d52fd6191d8 | [
"MIT"
] | null | null | null | """This script plots the hydrogen energies with different basis sets. """
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("seaborn")
def fetch_data():
return np.loadtxt(
fname="energies.csv",
delimiter=";",
skiprows=1
)
def plot(data):
plt.plot(data[:, 0], data[:, 1])
plt.ylabel("Energy / eV")
plt.xlabel("Basis Size / arb.")
plt.xticks(
ticks=list(range(4)),
labels=['Minimal', 'Tier1', 'Tier 2', 'Tier 3']
)
def main():
plot(fetch_data())
plt.show()
if __name__ == '__main__':
main()
| 19.666667 | 73 | 0.586441 |
7958508a64e801fdad7d5e90992ebfdf5cad4a94 | 8,556 | py | Python | venv/Lib/site-packages/player/layout_impl.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/player/layout_impl.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/player/layout_impl.py | mandamg/Exercicios-de-Python-do-Curso-em-Video | 3f818c11c3c10213bebc1dfb6a740adee468ea3a | [
"MIT"
] | null | null | null | """ layout implementation """
import sys
import json
import logging
import random
import venusian
from collections import namedtuple
from collections import OrderedDict
from zope.interface import providedBy, Interface
from pyramid.compat import text_, string_types
from pyramid.config.views import DefaultViewMapper
from pyramid.location import lineage
from pyramid.registry import Introspectable
from pyramid.renderers import RendererHelper
from pyramid.interfaces import IRequest, IResponse, IRouteRequest
log = logging.getLogger('player')
LAYOUT_ID = 'player:layout'
LayoutInfo = namedtuple(
'LayoutInfo', 'name layout view original renderer intr')
CodeInfo = namedtuple(
'Codeinfo', 'filename lineno function source module')
class ILayout(Interface):
""" marker interface """
def query_layout(root, context, request, name=''):
""" query named layout for context """
assert IRequest.providedBy(request), "must pass in a request object"
try:
iface = request.request_iface
except AttributeError:
iface = IRequest
root = providedBy(root)
adapters = request.registry.adapters
for context in lineage(context):
layout_factory = adapters.lookup(
(root, iface, providedBy(context)), ILayout, name=name)
if layout_factory is not None:
return layout_factory, context
return None, None
def query_layout_chain(root, context, request, layoutname=''):
chain = []
layout, layoutcontext = query_layout(root, context, request, layoutname)
if layout is None:
return chain
chain.append((layout, layoutcontext))
contexts = {layoutname: layoutcontext}
while layout is not None and layout.layout is not None:
if layout.layout in contexts:
l_context = contexts[layout.layout].__parent__
else:
l_context = context
layout, layoutcontext = query_layout(
root, l_context, request, layout.layout)
if layout is not None:
chain.append((layout, layoutcontext))
contexts[layout.name] = layoutcontext
if layout.layout is None:
break
return chain
def add_layout(cfg, name='', context=None, root=None, parent=None,
renderer=None, route_name=None, use_global_views=True,
view=None):
"""Registers a layout.
:param name: Layout name
:param context: Specific context for this layout.
:param root: Root object
:param parent: A parent layout. None means no parent layout.
:param renderer: A pyramid renderer
:param route_name: A pyramid route_name. Apply layout only for
specific route
:param use_global_views: Apply layout to all routes. even is route
doesnt use use_global_views.
:param view: View callable
Simple example with one default layout and 'page' layout.
.. code-block:: python
class PageLayout(object):
...
config.add_layout('page', parent='page', renderer='my_package:template/page.pt')
To use layout with pyramid view use ``renderer=player.layout('my_pkg:template/page.pt')``
Example:
.. code-block:: python
config.add_view('
index.html',
renderer = player.layout('...'))
in this example '' layout is beeing used. You can specify specific layout
name for pyramid view ``player.layout('page', 'layout name')``
"""
(scope, module,
f_locals, f_globals, codeinfo) = venusian.getFrameInfo(sys._getframe(2))
codeinfo = CodeInfo(
codeinfo[0], codeinfo[1], codeinfo[2], codeinfo[3], module.__name__)
discr = (LAYOUT_ID, name, context, route_name)
intr = Introspectable(LAYOUT_ID, discr, name, 'player_layout')
intr['name'] = name
intr['context'] = context
intr['root'] = root
intr['renderer'] = renderer
intr['route_name'] = route_name
intr['parent'] = parent
intr['use_global_views'] = use_global_views
intr['view'] = view
intr['codeinfo'] = codeinfo
if not parent:
parent = None
elif parent == '.':
parent = ''
if isinstance(renderer, string_types):
renderer = RendererHelper(name=renderer, registry=cfg.registry)
if context is None:
context = Interface
def register():
request_iface = IRequest
if route_name is not None:
request_iface = cfg.registry.getUtility(
IRouteRequest, name=route_name)
if use_global_views:
request_iface = Interface
mapper = getattr(view, '__view_mapper__', DefaultViewMapper)
mapped_view = mapper()(view)
info = LayoutInfo(name, parent, mapped_view, view, renderer, intr)
cfg.registry.registerAdapter(
info, (root, request_iface, context), ILayout, name)
cfg.action(discr, register, introspectables=(intr,))
class LayoutRenderer(object):
def __init__(self, layout):
self.layout = layout
def layout_info(self, layout, context, request, content,
colors=('green','blue','yellow','gray','black')):
intr = layout.intr
view = intr['view']
if view is not None:
layout_factory = '%s.%s'%(view.__module__, view.__name__)
else:
layout_factory = 'None'
data = OrderedDict(
(('name', intr['name']),
('parent-layout', intr['parent']),
('layout-factory', layout_factory),
('python-module', intr['codeinfo'].module),
('python-module-location', intr['codeinfo'].filename),
('python-module-line', intr['codeinfo'].lineno),
('renderer', intr['renderer']),
('context', '%s.%s'%(context.__class__.__module__,
context.__class__.__name__)),
('context-path', request.resource_url(context)),
))
content = text_('\n<!-- layout:\n%s \n-->\n'\
'<div style="border: 2px solid %s">%s</div>')%(
json.dumps(data, indent=2), random.choice(colors), content)
return content
def __call__(self, content, context, request):
chain = query_layout_chain(request.root, context, request, self.layout)
if not chain:
log.warning(
"Can't find layout '%s' for context '%s'",
self.layout, context)
return content
value = request.layout_data
for layout, layoutcontext in chain:
if layout.view is not None:
vdata = layout.view(layoutcontext, request)
if IResponse.providedBy(vdata):
return vdata
if vdata is not None:
value.update(vdata)
system = {'view': getattr(request, '__view__', None),
'renderer_info': layout.renderer,
'context': layoutcontext,
'request': request,
'content': content,
'wrapped_content': content}
content = layout.renderer.render(value, system, request)
if getattr(request, '__layout_debug__', False):
content = self.layout_info(
layout, layoutcontext, request, content)
return content
def set_layout_data(request, **kw):
request.layout_data.update(kw)
class layout(RendererHelper):
package = None
renderer = None
type = 'player:layout'
def __init__(self, name='', layout=''):
self.name = name
self.layout_name = layout
def render(self, value, system_values, request=None):
renderer = self.renderer
context = system_values.get('context', None)
try:
layout = self.layout
registry = self.registry
except AttributeError:
layout = self.layout = LayoutRenderer(self.layout_name)
registry = self.registry = request.registry
if self.name:
renderer = self.renderer = RendererHelper(
self.name, registry=registry)
if renderer:
value = renderer.render(value, system_values, request)
return layout(value, context, request)
def render_to_response(self, value, system_values, request=None):
result = self.render(value, system_values, request=request)
if IResponse.providedBy(result):
return result
return self._make_response(result, request)
| 30.776978 | 93 | 0.618396 |
79585102c02e68d0c12fc3f22297f6bac8ceb047 | 1,608 | py | Python | python_modules/libraries/dagster-azure/setup.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-azure/setup.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-azure/setup.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from setuptools import find_packages, setup
def get_version() -> str:
version: Dict[str, str] = {}
with open("dagster_azure/version.py", encoding="utf8") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
ver = get_version()
# dont pin dev installs to avoid pip dep resolver issues
pin = "" if ver == "0+dev" else f"=={ver}"
setup(
name="dagster-azure",
version=ver,
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="Package for Azure-specific Dagster framework op and resource components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-azure",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_azure_tests*"]),
include_package_data=True,
install_requires=[
"azure-core<2.0.0,>=1.7.0",
"azure-storage-blob<13.0.0,>=12.5.0",
"azure-storage-file-datalake<13.0.0,>=12.5",
f"dagster{pin}",
],
entry_points={"console_scripts": ["dagster-azure = dagster_azure.cli.cli:main"]},
zip_safe=False,
)
| 35.733333 | 103 | 0.598259 |
7958525f058e04e212af78f58fe9702d105a0e86 | 5,245 | py | Python | my_site/settings.py | a1401358759/my_site | 9ed227f825f1c25c903a10271ea429fba1e1ee73 | [
"MIT"
] | 50 | 2019-02-19T09:57:07.000Z | 2021-11-09T12:02:14.000Z | my_site/settings.py | a1401358759/my_site | 9ed227f825f1c25c903a10271ea429fba1e1ee73 | [
"MIT"
] | 17 | 2019-12-13T07:09:53.000Z | 2021-12-11T03:57:58.000Z | my_site/settings.py | a1401358759/my_site | 9ed227f825f1c25c903a10271ea429fba1e1ee73 | [
"MIT"
] | 11 | 2019-02-19T09:58:08.000Z | 2021-03-28T13:22:20.000Z | """
Django settings for my_site project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from utils.libs.config.logger_settings import LOGGING, LOG_ROOT
from utils.config import (
MYSQL_HOST, MYSQL_PORT, MYSQL_DATABASE, MYSQL_USER, MYSQL_PASSWORD,
REDIS_HOST, REDIS_PORT
)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p-2_9jdgcawck*piav1d(kq-!((g#8#riop01(^5ilnl6f(ram'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG', 'True') == 'True'
ALLOWED_HOSTS = ['*']
LOGIN_URL = '/manager/login/'
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'bootstrap_pagination',
'article',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'utils.dlibs.middleware.request_init.RequestInitMiddleware',
]
# 在Django 3.0.x中XFrameOptionsMiddleware中间件设置X_FRAME_OPTIONS的默认值从SAMEORIGIN更改为DENY
X_FRAME_OPTIONS = 'SAMEORIGIN'
ROOT_URLCONF = 'my_site.urls'
WSGI_APPLICATION = 'my_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
MYSQLDB_CONNECT_TIMEOUT = 1
DATABASES = {
'default': {
'CONN_MAX_AGE': 3600,
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DATABASE,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASSWORD,
'HOST': MYSQL_HOST,
'port': MYSQL_PORT,
'TEST': {
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_general_ci'
},
'OPTIONS': {
'connect_timeout': MYSQLDB_CONNECT_TIMEOUT,
'charset': 'utf8mb4'
}
}
}
# session设置
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # 是否将session有效期设置为到浏览器关闭为止
SESSION_COOKIE_AGE = 24 * 60 * 60 # 当上例为False时,此项生效,单位为秒
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache', # django-redis==4.11.0支持Django3.0+
'LOCATION': f'redis://{REDIS_HOST}:{REDIS_PORT}/1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': '',
'CONNECTION_POOL_KWARGS': {
'max_connections': 100
},
}
},
}
# celery config
from config.celery_conf import *
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static/'),
# ]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# cors headers config
CORS_ORIGIN_ALLOW_ALL = True
for key, handler in LOGGING['handlers'].items():
if handler.get('filename', None):
handler['filename'] = os.path.join(LOG_ROOT, "logs", os.path.basename(handler['filename']))
| 28.977901 | 99 | 0.682555 |
79585311fc6bb6f0adae7eb7844cadffacb1cf89 | 393 | py | Python | climbing-stairs/climbing-stairs.py | rams1996/Dynamic-Programming | 61e7b7d1576202215b5d0159372830c356f8dea8 | [
"MIT"
] | null | null | null | climbing-stairs/climbing-stairs.py | rams1996/Dynamic-Programming | 61e7b7d1576202215b5d0159372830c356f8dea8 | [
"MIT"
] | null | null | null | climbing-stairs/climbing-stairs.py | rams1996/Dynamic-Programming | 61e7b7d1576202215b5d0159372830c356f8dea8 | [
"MIT"
] | null | null | null | class Solution:
def climbStairs(self, n: int) -> int:
dp={}
def dfs(total,c):
if total in dp:
return dp[total]
if total==n:
return 1
if total>n:
return 0
a=dfs(total+1,c+1)
b=dfs(total+2,c+1)
dp[total]=a+b
return a+b
return dfs(0,0)
| 24.5625 | 41 | 0.402036 |
7958532390422ce3c5cac766ca8aa3c95b54fe73 | 955 | py | Python | thornode_client/test/test_liquidity_provider.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | thornode_client/test/test_liquidity_provider.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | thornode_client/test/test_liquidity_provider.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | # coding: utf-8
"""
THORChain API
This documentation outlines the API for THORChain. NOTE: This document is a **work in progress**. # noqa: E501
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import thornode_client
from thornode_client.models.liquidity_provider import LiquidityProvider # noqa: E501
from thornode_client.rest import ApiException
class TestLiquidityProvider(unittest.TestCase):
"""LiquidityProvider unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLiquidityProvider(self):
"""Test LiquidityProvider"""
# FIXME: construct object with mandatory attributes with example values
# model = thornode_client.models.liquidity_provider.LiquidityProvider() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.292683 | 116 | 0.712042 |
795854137411597bb41fb26a7e11d953a58699b0 | 2,190 | py | Python | ai/common/env_runner.py | alexgorin/gymai | 2f32fcc8cbd3cce5ba43afe2293776b9e7f5ffa3 | [
"MIT"
] | null | null | null | ai/common/env_runner.py | alexgorin/gymai | 2f32fcc8cbd3cce5ba43afe2293776b9e7f5ffa3 | [
"MIT"
] | null | null | null | ai/common/env_runner.py | alexgorin/gymai | 2f32fcc8cbd3cce5ba43afe2293776b9e7f5ffa3 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Tuple
import gym
from ai.common.agent import Agent
@dataclass
class EnvRunner:
env: gym.Env
agent: Agent
render: bool = False
episode_reward_history: List[Tuple[int, float]] = field(default_factory=list, init=False)
def run(self, episode_count: int, steps_count: int) -> List[float]:
self.on_start()
done, step_index = False, 0
self.episode_reward_history = []
for episode_index in range(episode_count):
episode_reward = 0
observation = self.env.reset()
self.on_episode_start(episode_index, observation)
for step_index in range(steps_count):
action, next_observation, reward, done, info = self.on_step(episode_index, step_index, observation)
episode_reward += reward
if done:
self.on_done(episode_index, step_index, observation, action, next_observation, reward, done, info)
break
else:
observation = next_observation
self.episode_reward_history.append((step_index + 1, episode_reward))
self.on_episode_finish(episode_index, done, steps_count=step_index)
self.on_finish()
return self.episode_reward_history
def on_start(self):
pass
def on_finish(self):
self.env.close()
def on_episode_start(self, episode_index, observation):
pass
def on_episode_finish(self, episode_index, done, steps_count):
if done:
print(f"Episode {episode_index} finished after {steps_count} steps")
else:
print(f"Episode {episode_index} lasted longer than {steps_count + 1} steps")
def on_step(self, episode_index, step_index, observation):
if self.render:
self.env.render()
action = self.agent.act(observation)
next_observation, reward, done, info = self.env.step(action)
return action, next_observation, reward, done, info
def on_done(self, episode_index, step_index, observation, action, next_observation, reward, done, info):
pass
| 35.322581 | 118 | 0.648402 |
795854524a66e8ea45fa1a27c0efd0a40190535a | 1,557 | py | Python | reference_parsing/model_dev/BER_error_calculation.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | null | null | null | reference_parsing/model_dev/BER_error_calculation.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | 6 | 2020-03-20T18:10:01.000Z | 2021-09-29T17:31:17.000Z | reference_parsing/model_dev/BER_error_calculation.py | ScholarIndex/LinkedBooks | 0cae008427ed1eb34a882e9d85f24b42b3ee3a28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Balanced Error Rate error functions
"""
__author__ = """Giovanni Colavizza"""
import numpy as np
def BER(yn, ynhat):
"""
Implementation of Balanced Error Rate
:param yn: ground truth
:param ynhat: predicted values
:return: error score
"""
y = list()
for z in yn:
y.extend(z)
yhat = list()
for z in ynhat:
yhat.extend(z)
yn = np.array(y)
ynhat = np.array(yhat)
c = set(list(yn) + list(ynhat)) # set of unique classes
error = 0.0
numClasses = 0
for C in c:
if(len(np.array(yn == C)) != 0):
error += np.sum(np.array(yn == C) * np.array(yn != ynhat))/float(np.sum(np.array(yn == C)))
numClasses += 1
if numClasses == 0: return 1.0
error = error/numClasses
return error
def BER_vector(yn, ynhat):
"""
Implementation of Balanced Error Rate, returns a vector with errors for each class
:param yn: ground truth
:param ynhat: predicted values
:return: error score vector, scores for each class
"""
y = list()
for z in yn:
y.extend(z)
yhat = list()
for z in ynhat:
yhat.extend(z)
yn = np.array(y)
ynhat = np.array(yhat)
c = set(list(yn) + list(ynhat)) # set of unique classes
error = list()
classes = list()
for C in c:
if(np.sum(np.array(yn == C)) != 0):
error.append(np.sum(np.array(yn == C) * np.array(yn != ynhat))/float(np.sum(np.array(yn == C))))
classes.append(C)
return error, classes | 25.52459 | 108 | 0.567116 |
795854f0c0a0c3d6057255d60e176be6e8ad8bde | 12,789 | py | Python | conda_build/inspect_pkg.py | isuruf/conda-build | 9f163925f5d03a46e921162892bf4c6bc86b1072 | [
"BSD-3-Clause"
] | null | null | null | conda_build/inspect_pkg.py | isuruf/conda-build | 9f163925f5d03a46e921162892bf4c6bc86b1072 | [
"BSD-3-Clause"
] | 1 | 2019-10-08T15:03:56.000Z | 2019-10-08T15:03:56.000Z | conda_build/inspect_pkg.py | awwad/conda-build | b0be80283ec2e3ef7e49b5da923b1438e74e27b5 | [
"BSD-3-Clause"
] | null | null | null | # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import json
from operator import itemgetter
from os.path import abspath, join, dirname, exists, basename
import os
import re
import sys
import tempfile
from conda_build.os_utils.ldd import get_linkages, get_package_obj_files, get_untracked_obj_files
from conda_build.os_utils.liefldd import codefile_type
from conda_build.os_utils.macho import get_rpaths, human_filetype
from conda_build.utils import (groupby, getter, comma_join, rm_rf, package_has_file, get_logger,
ensure_list)
from conda_build.conda_interface import (iteritems, specs_from_args, is_linked, linked_data, linked,
get_index)
from conda_build.conda_interface import display_actions, install_actions
from conda_build.conda_interface import memoized
@memoized
def dist_files(prefix, dist):
meta = is_linked(prefix, dist)
return set(meta['files']) if meta else set()
def which_package(in_prefix_path, prefix):
"""
given the path of a conda installed file iterate over
the conda packages the file came from. Usually the iteration yields
only one package.
"""
for dist in linked(prefix):
if in_prefix_path.replace(os.sep, '/') in dist_files(prefix, dist):
yield dist
def print_object_info(info, key):
output_string = ""
gb = groupby(key, info)
for header in sorted(gb, key=str):
output_string += header + "\n"
for f_info in sorted(gb[header], key=getter('filename')):
for data in sorted(f_info):
if data == key:
continue
if f_info[data] is None:
continue
output_string += ' %s: %s\n' % (data, f_info[data])
if len([i for i in f_info if f_info[i] is not None and i != key]) > 1:
output_string += '\n'
output_string += '\n'
return output_string
class _untracked_package:
def __str__(self):
return "<untracked>"
untracked_package = _untracked_package()
def check_install(packages, platform=None, channel_urls=(), prepend=True,
minimal_hint=False):
prefix = tempfile.mkdtemp('conda')
try:
specs = specs_from_args(packages)
index = get_index(channel_urls=channel_urls, prepend=prepend,
platform=platform, prefix=prefix)
actions = install_actions(prefix, index, specs, pinned=False,
minimal_hint=minimal_hint)
display_actions(actions, index)
return actions
finally:
rm_rf(prefix)
return None
def print_linkages(depmap, show_files=False):
# Print system and not found last
dist_depmap = {}
for k, v in depmap.items():
if hasattr(k, 'dist_name'):
k = k.dist_name
dist_depmap[k] = v
depmap = dist_depmap
k = sorted(set(depmap.keys()) - {'system', 'not found'})
all_deps = k if 'not found' not in depmap.keys() else k + ['system', 'not found']
output_string = ""
for dep in all_deps:
output_string += "%s:\n" % dep
if show_files:
for lib, path, binary in sorted(depmap[dep]):
output_string += " %s (%s) from %s\n" % (lib, path, binary)
else:
for lib, path in sorted(set(map(itemgetter(0, 1), depmap[dep]))):
output_string += " %s (%s)\n" % (lib, path)
output_string += "\n"
return output_string
def replace_path(binary, path, prefix):
if sys.platform.startswith('linux'):
return abspath(path)
elif sys.platform.startswith('darwin'):
if path == basename(binary):
return abspath(join(prefix, binary))
if '@rpath' in path:
rpaths = get_rpaths(join(prefix, binary))
if not rpaths:
return "NO LC_RPATH FOUND"
else:
for rpath in rpaths:
path1 = path.replace("@rpath", rpath)
path1 = path1.replace('@loader_path', join(prefix, dirname(binary)))
if exists(abspath(join(prefix, path1))):
path = path1
break
else:
return 'not found'
path = path.replace('@loader_path', join(prefix, dirname(binary)))
if path.startswith('/'):
return abspath(path)
return 'not found'
def test_installable(channel='defaults'):
success = True
log = get_logger(__name__)
has_py = re.compile(r'py(\d)(\d)')
for platform in ['osx-64', 'linux-32', 'linux-64', 'win-32', 'win-64']:
log.info("######## Testing platform %s ########", platform)
channels = [channel]
index = get_index(channel_urls=channels, prepend=False, platform=platform)
for _, rec in iteritems(index):
# If we give channels at the command line, only look at
# packages from those channels (not defaults).
if channel != 'defaults' and rec.get('schannel', 'defaults') == 'defaults':
continue
name = rec['name']
if name in {'conda', 'conda-build'}:
# conda can only be installed in the root environment
continue
if name.endswith('@'):
# this is a 'virtual' feature record that conda adds to the index for the solver
# and should be ignored here
continue
# Don't fail just because the package is a different version of Python
# than the default. We should probably check depends rather than the
# build string.
build = rec['build']
match = has_py.search(build)
assert match if 'py' in build else True, build
if match:
additional_packages = ['python=%s.%s' % (match.group(1), match.group(2))]
else:
additional_packages = []
version = rec['version']
log.info('Testing %s=%s', name, version)
try:
install_steps = check_install([name + '=' + version] + additional_packages,
channel_urls=channels, prepend=False,
platform=platform)
success &= bool(install_steps)
except KeyboardInterrupt:
raise
# sys.exit raises an exception that doesn't subclass from Exception
except BaseException as e:
success = False
log.error("FAIL: %s %s on %s with %s (%s)", name, version,
platform, additional_packages, e)
return success
def _installed(prefix):
installed = linked_data(prefix)
installed = {rec['name']: dist for dist, rec in iteritems(installed)}
return installed
def _underlined_text(text):
return str(text) + '\n' + '-' * len(str(text)) + '\n\n'
def inspect_linkages(packages, prefix=sys.prefix, untracked=False,
all_packages=False, show_files=False, groupby="package", sysroot=""):
pkgmap = {}
installed = _installed(prefix)
if not packages and not untracked and not all_packages:
raise ValueError("At least one package or --untracked or --all must be provided")
if all_packages:
packages = sorted(installed.keys())
if untracked:
packages.append(untracked_package)
for pkg in ensure_list(packages):
if pkg == untracked_package:
dist = untracked_package
elif pkg not in installed:
sys.exit("Package %s is not installed in %s" % (pkg, prefix))
else:
dist = installed[pkg]
if not sys.platform.startswith(('linux', 'darwin')):
sys.exit("Error: conda inspect linkages is only implemented in Linux and OS X")
if dist == untracked_package:
obj_files = get_untracked_obj_files(prefix)
else:
obj_files = get_package_obj_files(dist, prefix)
linkages = get_linkages(obj_files, prefix, sysroot)
depmap = defaultdict(list)
pkgmap[pkg] = depmap
depmap['not found'] = []
depmap['system'] = []
for binary in linkages:
for lib, path in linkages[binary]:
path = replace_path(binary, path, prefix) if path not in {'',
'not found'} else path
if path.startswith(prefix):
in_prefix_path = re.sub('^' + prefix + '/', '', path)
deps = list(which_package(in_prefix_path, prefix))
if len(deps) > 1:
deps_str = [str(dep) for dep in deps]
get_logger(__name__).warn("Warning: %s comes from multiple "
"packages: %s", path, comma_join(deps_str))
if not deps:
if exists(path):
depmap['untracked'].append((lib, path.split(prefix +
'/', 1)[-1], binary))
else:
depmap['not found'].append((lib, path.split(prefix +
'/', 1)[-1], binary))
for d in deps:
depmap[d].append((lib, path.split(prefix + '/',
1)[-1], binary))
elif path == 'not found':
depmap['not found'].append((lib, path, binary))
else:
depmap['system'].append((lib, path, binary))
output_string = ""
if groupby == 'package':
for pkg in packages:
output_string += _underlined_text(pkg)
output_string += print_linkages(pkgmap[pkg], show_files=show_files)
elif groupby == 'dependency':
# {pkg: {dep: [files]}} -> {dep: {pkg: [files]}}
inverted_map = defaultdict(lambda: defaultdict(list))
for pkg in pkgmap:
for dep in pkgmap[pkg]:
if pkgmap[pkg][dep]:
inverted_map[dep][pkg] = pkgmap[pkg][dep]
# print system and not found last
k = sorted(set(inverted_map.keys()) - {'system', 'not found'})
for dep in k + ['system', 'not found']:
output_string += _underlined_text(dep)
output_string += print_linkages(inverted_map[dep], show_files=show_files)
else:
raise ValueError("Unrecognized groupby: %s" % groupby)
if hasattr(output_string, 'decode'):
output_string = output_string.decode('utf-8')
return output_string
def inspect_objects(packages, prefix=sys.prefix, groupby='package'):
installed = _installed(prefix)
output_string = ""
for pkg in ensure_list(packages):
if pkg == untracked_package:
dist = untracked_package
elif pkg not in installed:
raise ValueError("Package %s is not installed in %s" % (pkg, prefix))
else:
dist = installed[pkg]
output_string += _underlined_text(pkg)
if not sys.platform.startswith('darwin'):
sys.exit("Error: conda inspect objects is only implemented in OS X")
if dist == untracked_package:
obj_files = get_untracked_obj_files(prefix)
else:
obj_files = get_package_obj_files(dist, prefix)
info = []
for f in obj_files:
f_info = {}
path = join(prefix, f)
filetype = codefile_type(path)
if filetype == 'machofile':
f_info['filetype'] = human_filetype(path)
f_info['rpath'] = ':'.join(get_rpaths(path))
f_info['filename'] = f
info.append(f_info)
output_string += print_object_info(info, groupby)
if hasattr(output_string, 'decode'):
output_string = output_string.decode('utf-8')
return output_string
def get_hash_input(packages):
hash_inputs = {}
for pkg in ensure_list(packages):
pkgname = os.path.basename(pkg)[:-8]
hash_inputs[pkgname] = {}
hash_input = package_has_file(pkg, 'info/hash_input.json')
if hash_input:
hash_inputs[pkgname]['recipe'] = json.loads(hash_input)
else:
hash_inputs[pkgname] = "<no hash_input.json in file>"
return hash_inputs
| 37.949555 | 100 | 0.570412 |
7958553ac4e5bd69c83f78c91705554bafebb2a3 | 1,750 | py | Python | mesonui/mesonuilib/mesonbuild/wrap.py | michaelbadcrumble/meson-ui | e2db2b87872d87c101001b87fe48c5172bb8d5ff | [
"Apache-2.0"
] | 24 | 2019-10-14T21:17:27.000Z | 2020-05-28T11:35:14.000Z | mesonui/mesonuilib/mesonbuild/wrap.py | michaelbrockus/meson-ui | e2db2b87872d87c101001b87fe48c5172bb8d5ff | [
"Apache-2.0"
] | 52 | 2019-11-30T18:35:25.000Z | 2020-04-25T22:09:35.000Z | mesonui/mesonuilib/mesonbuild/wrap.py | michaelbadcrumble/meson-ui | e2db2b87872d87c101001b87fe48c5172bb8d5ff | [
"Apache-2.0"
] | 2 | 2019-10-15T01:11:18.000Z | 2019-11-03T06:36:08.000Z | #!/usr/bin/env python3
#
# author : Michael Brockus.
# contact: <mailto:michaelbrockus@gmail.com>
# license: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0
#
# copyright 2020 The Meson-UI development team
#
import subprocess
class MesonWrap:
def __init__(self):
super().__init__()
def update(self, wrap_args) -> None:
run_cmd = ['meson', 'wrap', 'update', wrap_args]
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
def search(self, wrap_args) -> None:
run_cmd = ['meson', 'wrap', 'search', wrap_args]
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
def info(self, wrap_args) -> None:
run_cmd = ['meson', 'wrap', 'info', wrap_args]
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
def install(self, wrap_args) -> None:
run_cmd = ['meson', 'wrap', 'install', wrap_args]
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
def list_wraps(self) -> None:
run_cmd = ['meson', 'wrap', 'list']
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
def status(self) -> None:
run_cmd = ['meson', 'wrap', 'status']
process = subprocess.Popen(run_cmd, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.communicate()[0]
| 38.043478 | 108 | 0.661143 |
7958559017215d4078254ce79ccacd1397480721 | 370 | py | Python | multiple-languages/python/ros-cdk-ram-1.0.3/src/ros_cdk_ram/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 15 | 2020-11-10T02:00:28.000Z | 2022-02-07T19:28:10.000Z | multiple-languages/python/ros-cdk-ram-1.0.3/src/ros_cdk_ram/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 23 | 2021-02-02T04:37:02.000Z | 2022-03-31T06:41:06.000Z | multiple-languages/python/ros-cdk-ram-1.0.3/src/ros_cdk_ram/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 4 | 2021-01-13T05:48:43.000Z | 2022-03-15T11:26:48.000Z | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import constructs._jsii
import ros_cdk_core._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@alicloud/ros-cdk-ram", "1.0.3", __name__[0:-6], "ros-cdk-ram@1.0.3.jsii.tgz"
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| 16.086957 | 82 | 0.759459 |
795858ae611463a52d07be350e64de140fa839c6 | 4,503 | py | Python | autoencoder/trainer.py | Silver-L/keras_projects | 92124dc2ec4adc4c0509d0ee0655ebd39b8ba937 | [
"MIT"
] | null | null | null | autoencoder/trainer.py | Silver-L/keras_projects | 92124dc2ec4adc4c0509d0ee0655ebd39b8ba937 | [
"MIT"
] | null | null | null | autoencoder/trainer.py | Silver-L/keras_projects | 92124dc2ec4adc4c0509d0ee0655ebd39b8ba937 | [
"MIT"
] | null | null | null | """
* @Trainer
* @Author: Zhihui Lu
* @Date: 2018/07/21
"""
import os
import numpy as np
import argparse
import dataIO as io
from random import randint
import matplotlib.pyplot as plt
import pickle
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from keras.optimizers import Adam
from keras import losses
from keras.callbacks import ModelCheckpoint
from model import Autoencoder
def main():
parser = argparse.ArgumentParser(description='py, train_data_txt, train_data_ture_txt, validation_data_txt, outdir')
parser.add_argument('--train_data_txt', '-i1', default='',
help='train data list')
parser.add_argument('--train_ground_truth_txt', '-i2',
default='',
help='train ground truth list')
parser.add_argument('--validation_data_txt', '-i3', default='',
help='validation data list')
parser.add_argument('--validation_ground_truth_txt', '-i4',
default='',
help='validation ground truth list')
parser.add_argument('--outdir', '-i5', default='', help='outdir')
args = parser.parse_args()
# check folder
if not (os.path.exists(args.outdir)):
os.mkdir(args.outdir)
# define
batch_size = 3
epoch = 2500
# load train data
train_data = io.load_matrix_data(args.train_data_txt, 'float32')
train_data = np.expand_dims(train_data, axis=4)
# load train ground truth
train_truth = io.load_matrix_data(args.train_ground_truth_txt, 'float32')
train_truth = np.expand_dims(train_truth, axis=4)
# load validation data
val_data = io.load_matrix_data(args.validation_data_txt, 'float32')
val_data = np.expand_dims(val_data, axis=4)
# load validation ground truth
val_truth = io.load_matrix_data(args.validation_ground_truth_txt, 'float32')
val_truth = np.expand_dims(val_truth, axis=4)
print(' number of training: {}'.format(len(train_data)))
print('size of traning: {}'.format(train_data.shape))
print(' number of validation: {}'.format(len(val_data)))
print('size of validation: {}'.format(val_data.shape))
image_size = []
image_size.extend([list(train_data.shape)[1], list(train_data.shape)[2], list(train_data.shape)[3]])
# set network
network = Autoencoder(*image_size)
model = network.model()
model.summary()
model.compile(optimizer='Nadam', loss=losses.mean_squared_error, metrics=['mse'])
# set data_set
train_steps, train_data = batch_iter(train_data, train_truth, batch_size)
valid_steps, val_data = batch_iter(val_data, val_truth, batch_size)
# fit network
model_checkpoint = ModelCheckpoint(os.path.join(args.outdir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
verbose=1)
history = model.fit_generator(train_data, steps_per_epoch=train_steps, epochs=epoch, validation_data=val_data,
validation_steps=valid_steps, verbose=1, callbacks=[model_checkpoint])
plot_history(history, args.outdir)
# data generator
def batch_iter(data, labels, batch_size, shuffle=True):
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
def data_generator():
data_size = len(data)
while True:
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
shuffled_labels = labels[shuffle_indices]
else:
shuffled_data = data
shuffled_labels = labels
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
X = shuffled_data[start_index: end_index]
y = shuffled_labels[start_index: end_index]
yield X, y
return num_batches_per_epoch, data_generator()
# plot loss
def plot_history(history, path):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
fig = plt.legend(['loss', 'val_loss'], loc='lower right')
filename = open(os.path.join(path,'loss.pickle'), 'wb')
pickle.dump(fig, filename)
plt.show()
if __name__ == '__main__':
main()
| 33.355556 | 120 | 0.650899 |
79585aa28af20826d157fe3d13a487a9c0e17d4e | 1,374 | py | Python | CTCI Solutions/arrays/checkPermutation.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null | CTCI Solutions/arrays/checkPermutation.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | 1 | 2020-07-19T15:40:25.000Z | 2020-07-19T15:40:25.000Z | CTCI Solutions/arrays/checkPermutation.py | kamwithak/competitiveProgramming | ab4433568081900212a8a987d7bf8cb78d2698d1 | [
"MIT"
] | null | null | null | '''
PROBLEM STATEMENT:
Check Permutation:
Given two strings, write a method to decide if one is a permutation of the other.
'''
'''
ANS:
We can either sort the strings individually, and compare if the resulting strings are equivalent
Although not optimal, it is clean and easy to understand.
Or, we can count the number of occurences for each character, and check if that is equivalent
'''
'''
Time: O(n*log(n)), depending on sorting implementation. For ex) BubbleSort would be O(n^2)
Space: O(n), array
'''
def checkPermutation1(str1: str, str2: str) -> bool:
if (len(str1) != len(str2)):
return False
# ~
if (sorted(str1) == sorted(str2)):
return True
return False
#print(checkPermutation1("cba", "abc"))
'''
Time: O(n), using a hashtable for constant time look up
Space: O(n), atmost n key-value pairs
'''
def checkPermutation2(str1: str, str2: str) -> bool:
if (len(str1) != len(str2)):
return False
# ~
_dict1 = {} ; _dict2 = {}
for char in str1:
if (char not in _dict1):
_dict1[char] = 1
else:
_dict1[char] += 1
for char in str2:
if (char not in _dict2):
_dict2[char] = 1
else:
_dict2[char] += 1
return _dict1 == _dict2
#print(checkPermutation2("cba", "cba"))
| 26.423077 | 97 | 0.596798 |
79585b15689f5bb3e964ca74debf043aaa55e681 | 1,389 | py | Python | two_factor_authentication/models.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | two_factor_authentication/models.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | two_factor_authentication/models.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
# import Customer Models
from customer.models import CustomerProfile
class OTP(models.Model):
"""OTP
mobile and email otp authentication module
"""
otp = models.PositiveIntegerField()
is_verified = models.BooleanField(default=False)
auth_type = models.CharField(max_length=20, choices=[
('phone', 'Phone Number'), ('email', 'Email ID')
])
otp_status = models.CharField(max_length=20,
verbose_name='OTP Status',
choices=[('delivered', 'Delivered'),
('not_delivered', 'Not Delivered'),
('successful', 'Successful'),
('expired', 'Expired')])
created_at = models.DateTimeField(
auto_now_add=True,
verbose_name='Created At'
)
expired_datetime = models.DateTimeField(verbose_name="Expired At")
customer = models.ForeignKey(
CustomerProfile,
on_delete=models.CASCADE,
null=True,
blank=True
)
class Meta:
verbose_name = ' OTP Management'
verbose_name_plural = ' OTP Management'
db_table = 'otp_management'
ordering = ['-created_at']
def __str__(self):
return str(self.otp) | 31.568182 | 78 | 0.562275 |
79585b22be64eb9cc61e1e079cbc653221cdc844 | 7,344 | py | Python | tests/test_create_glb.py | IBM/cis-integration | fbf1f5c2df57b846499856ca927d2de15b4b375c | [
"Apache-2.0"
] | 4 | 2021-08-04T16:58:18.000Z | 2022-02-16T03:28:46.000Z | tests/test_create_glb.py | IBM/cis-integration | fbf1f5c2df57b846499856ca927d2de15b4b375c | [
"Apache-2.0"
] | 16 | 2021-07-13T13:50:31.000Z | 2021-08-13T15:31:32.000Z | tests/test_create_glb.py | IBM/cis-integration | fbf1f5c2df57b846499856ca927d2de15b4b375c | [
"Apache-2.0"
] | null | null | null | from _pytest.monkeypatch import resolve
import pytest
import os
from dotenv import load_dotenv
from pathlib import Path
from requests.models import Response
from src.ce.create_glb import GLB
from ibm_cloud_sdk_core.detailed_response import DetailedResponse
from ibm_cloud_networking_services import GlobalLoadBalancerPoolsV0
from ibm_cloud_networking_services import GlobalLoadBalancerV1
from ibm_cloud_networking_services import GlobalLoadBalancerMonitorV1
# custom class to be a mock Global Load Balancer Monitor object
# will override the GlobalLoadBalancerMonitorV1 object in create_glb.py
class MockGlobalLoadBalancerMonitorV1:
# create_load_balancer_monitor() creates a fake Load Balancer Monitor for testing
def create_load_balancer_monitor(self, description, crn, type, expected_codes, follow_redirects):
return DetailedResponse(response={"result": { "id": "testId", "description": description, "type": type, "expected_codes": expected_codes, "follow_redirects": follow_redirects}})
def test_create_load_balancer_monitor(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object
def mock_get(*args, **kwargs):
return MockGlobalLoadBalancerMonitorV1()
monkeypatch.setattr(GlobalLoadBalancerMonitorV1, "new_instance", mock_get)
creator = glb_creator()
monitor = creator.create_load_balancer_monitor()
assert monitor["result"]["description"] == "default health check"
assert monitor["result"]["type"] == "https"
assert monitor["result"]["expected_codes"] == "2xx"
assert monitor["result"]["follow_redirects"] == True
# custom class to be a mock Global Load Balancer Pools object
# will override the GlobalLoadBalancerPoolsV0 object in create_glb.py
class MockGlobalLoadBalancerPoolsV0:
def list_all_load_balancer_pools(self):
return DetailedResponse(response={"result": []})
def create_load_balancer_pool(self, name, origins, enabled, monitor):
return DetailedResponse(response={"result": { "id": "testId", "name": name, "enabled": enabled, "monitor": monitor, "origins": origins}})
def test_create_origin_pool(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object
def mock_get(*args, **kwargs):
return MockGlobalLoadBalancerPoolsV0()
monkeypatch.setattr(GlobalLoadBalancerPoolsV0, "new_instance", mock_get)
creator = glb_creator()
pool = creator.create_origin_pool()
assert pool["result"]["name"] == 'default-pool'
assert pool["result"]["enabled"] == True
assert pool["result"]["origins"] == [{"name": 'default-origin', "address": "test.com", "enabled": True, "weight":1}]
class MockGlobalLoadBalancerV1:
# mock set_service_url() serves as a useless mock method
def set_service_url(self, url):
pass
def list_all_load_balancers(self):
return DetailedResponse(response={"result": []})
def create_load_balancer(self, name, default_pools, fallback_pool, enabled=True, proxied=True):
return DetailedResponse(response={"result": { "id": "testId", "name": name, "enabled": enabled, "default_pools": default_pools, "fallback_pool": fallback_pool, "proxied": proxied}})
def test_create_global_load_balancer(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object
def mock_get(*args, **kwargs):
return MockGlobalLoadBalancerV1()
monkeypatch.setattr(GlobalLoadBalancerV1, "new_instance", mock_get)
creator = glb_creator()
glb = creator.create_global_load_balancer()
assert glb["result"]["name"] == "test.com"
assert glb["result"]["enabled"] == True
assert glb["result"]["proxied"] == True
# custom class to be a mock DNS Records object
# will override the DnsRecordsV1 object in dns_creator.py
class MockExistingGlobalLoadBalancerV1:
def __init__(self):
self.load_balancers_list = []
glb = { "id": "testId", "created_on": "testDate", "modified_on": "testDate", "name": "test.com"}
self.load_balancers_list.append(glb)
# mock set_service_url() serves as a useless mock method
def set_service_url(self, url):
pass
# mock list_all_dns_records() creates a fake list of DNS records
def list_all_load_balancers(self):
return DetailedResponse(response={"result": self.load_balancers_list})
# mock update_dns_records() updates a fake DNS record for testing
def edit_load_balancer(self, global_load_balancer_id, name, default_pools, fallback_pool, enabled, proxied):
for item in self.load_balancers_list:
if item['id'] == global_load_balancer_id:
item['name'] = name
item['default_pools'] = default_pools
item['fallback_pool'] = fallback_pool
item['enabled'] = enabled
item['proxied'] = proxied
return DetailedResponse(response={"result": item})
def test_edit_global_load_balancer(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object
def mock_get(*args, **kwargs):
return MockExistingGlobalLoadBalancerV1()
monkeypatch.setattr(GlobalLoadBalancerV1, "new_instance", mock_get)
creator = glb_creator()
glb = creator.create_global_load_balancer()
assert glb.result["result"]["name"] == "test.com"
assert glb.result["result"]["enabled"] == True
assert glb.result["result"]["proxied"] == True
# custom class to be a mock Global Load Balancer Pools object
# will override the GlobalLoadBalancerPoolsV0 object in create_glb.py
class MockExistingGlobalLoadBalancerPoolsV0:
def __init__(self):
self.origin_pool_list = []
origin_pool = { "id": "testId", "created_on": "testDate", "modified_on": "testDate", "name": "default-pool"}
self.origin_pool_list.append(origin_pool)
def list_all_load_balancer_pools(self):
return DetailedResponse(response={"result": self.origin_pool_list})
# create_load_balancer_monitor() creates a fake Load Balancer Monitor for testing
def edit_load_balancer_pool(self, origin_pool_id, name, origins, enabled, monitor):
for item in self.origin_pool_list:
if item['id'] == origin_pool_id:
item['name'] = name
item['origins'] = origins
item['monitor'] = monitor
item['enabled'] = enabled
return DetailedResponse(response={"result": item})
def test_edit_origin_pool(monkeypatch):
# Any arguments may be passed and mock_get() will always return our
# mocked object
def mock_get(*args, **kwargs):
return MockExistingGlobalLoadBalancerPoolsV0()
monkeypatch.setattr(GlobalLoadBalancerPoolsV0, "new_instance", mock_get)
creator = glb_creator()
pool = creator.create_origin_pool()
assert pool.result["result"]["name"] == 'default-pool'
assert pool.result["result"]["enabled"] == True
assert pool.result["result"]["origins"] == [{"name": 'default-origin', "address": "test.com", "enabled": True, "weight":1}]
def glb_creator():
return GLB(
crn="testString",
zone_identifier="testString",
api_endpoint="test-endpoint.com",
domain="test.com"
)
| 40.131148 | 189 | 0.706836 |
79585b421ee1d5d3bfa07877e33bec8f3b26aed9 | 19 | py | Python | grappelli/__init__.py | sistemasbuho/django-grappelli | 1ccd6a2b1fe0dc92034005533f02052ceb270a2d | [
"BSD-3-Clause"
] | 1 | 2017-10-24T03:26:48.000Z | 2017-10-24T03:26:48.000Z | grappelli/__init__.py | sistemasbuho/django-grappelli | 1ccd6a2b1fe0dc92034005533f02052ceb270a2d | [
"BSD-3-Clause"
] | 1 | 2018-12-23T01:42:55.000Z | 2018-12-23T11:09:52.000Z | grappelli/__init__.py | sistemasbuho/django-grappelli | 1ccd6a2b1fe0dc92034005533f02052ceb270a2d | [
"BSD-3-Clause"
] | 1 | 2018-12-11T22:44:44.000Z | 2018-12-11T22:44:44.000Z | VERSION = '2.10.1'
| 9.5 | 18 | 0.578947 |
79585bade038761c22b1329362aa1e10466fdc94 | 34,221 | py | Python | logscraper/tests/test_logsender.py | openstack/ci-log-processing | 1990b7c08f337035ce1136a4e091050bd7bd37ea | [
"Apache-2.0"
] | null | null | null | logscraper/tests/test_logsender.py | openstack/ci-log-processing | 1990b7c08f337035ce1136a4e091050bd7bd37ea | [
"Apache-2.0"
] | null | null | null | logscraper/tests/test_logsender.py | openstack/ci-log-processing | 1990b7c08f337035ce1136a4e091050bd7bd37ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import io
from logscraper import logsender
from logscraper.tests import base
from opensearchpy.exceptions import TransportError
from ruamel.yaml import YAML
from unittest import mock
buildinfo = """
_id: 17428524
branch: master
build_args:
checkpoint_file: /tmp/results-checkpoint
debug: false
directory: /tmp/logscraper
download: true
follow: false
gearman_port: 4730
gearman_server: null
insecure: true
job_name: null
logstash_url: null
max_skipped: 500
workers: 32
zuul_api_url:
- https://zuul.opendev.org/api/tenant/openstack
buildset:
uuid: 52b29e0e716a4436bd20eed47fa396ce
change: 829161
duration: 1707.0
end_time: '2022-02-28T10:07:36'
error_detail: null
event_id: dda0cbf9caaa496b9127a7646b8a28a8
event_timestamp: '2022-02-28T09:32:08'
final: true
held: false
job_name: openstack-tox-py39
log_url: https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/
newrev: null
nodeset: fedora-35
patchset: '3'
pipeline: check
project: openstack/neutron
provides: []
ref: refs/changes/61/829161/3
ref_url: https://review.opendev.org/829161
result: SUCCESS
start_time: '2022-02-28T09:39:09'
tenant: openstack
uuid: 38bf2cdc947643c9bb04f11f40a0f211
voting: true
"""
inventory_info = """
all:
hosts:
fedora-35:
ansible_connection: ssh
ansible_host: 127.0.0.1
ansible_port: 22
ansible_python_interpreter: auto
ansible_user: zuul
ara_compress_html: false
ara_report_path: ara-report
ara_report_type: html
bindep_profile: test py39
enable_fips: false
nodepool:
az: null
cloud: rax
external_id: 3b2da968-7ec3-4356-b12c-b55b574902f8
host_id: ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21
interface_ip: 127.0.0.2
label: fedora-35
private_ipv4: 127.0.0.3
private_ipv6: null
provider: rax-dfw
public_ipv4: 127.0.0.2
public_ipv6: ''
region: DFW
python_version: 3.9
tox_constraints_file: 'requirements/upper-constraints.txt'
tox_environment:
NOSE_HTML_OUT_FILE: nose_results.html
NOSE_WITH_HTML_OUTPUT: 1
NOSE_WITH_XUNIT: 1
tox_envlist: py39
vars:
ara_compress_html: false
ara_report_path: ara-report
ara_report_type: html
bindep_profile: test py39
enable_fips: false
python_version: 3.9
tox_constraints_file: 'requirements/upper-constraints.txt'
tox_environment:
NOSE_HTML_OUT_FILE: nose_results.html
NOSE_WITH_HTML_OUTPUT: 1
NOSE_WITH_XUNIT: 1
tox_envlist: py39
zuul:
_inheritance_path:
- 'some_path'
- 'some_path_2'
attempts: 1
branch: master
build: 38bf2cdc947643c9bb04f11f40a0f211
buildset: 52b29e0e716a4436bd20eed47fa396ce
change: '829161'
change_url: https://review.opendev.org/829161
child_jobs: []
event_id: dda0cbf9caaa496b9127a7646b8a28a8
executor:
hostname: ze07.opendev.org
inventory_file: /var/lib/zuul/builds/build/ansible/inventory.yaml
log_root: /var/lib/zuul/builds/build/work/logs
result_data_file: /var/lib/zuul/builds/build/work/results.json
src_root: /var/lib/zuul/builds/build/work/src
work_root: /var/lib/zuul/builds/build/work
items:
- branch: master
change: '828673'
change_url: https://review.opendev.org/828673
patchset: '4'
project:
canonical_hostname: opendev.org
canonical_name: opendev.org/openstack/neutron
name: openstack/neutron
short_name: neutron
src_dir: src/opendev.org/openstack/neutron
- branch: master
change: '829161'
change_url: https://review.opendev.org/829161
patchset: '3'
project:
canonical_hostname: opendev.org
canonical_name: opendev.org/openstack/neutron
name: openstack/neutron
short_name: neutron
src_dir: src/opendev.org/openstack/neutron
job: openstack-tox-py39
jobtags: []
message: Q3YmM0Y2QzNzhkMWZhOWE5ODYK
patchset: '3'
pipeline: check
playbook_context:
playbook_projects:
trusted/project_0/opendev.org/opendev/base-jobs:
canonical_name: opendev.org/opendev/base-jobs
checkout: master
commit: 19dc53290a26b20d5c2c5b1bb25f029c4b04a716
trusted/project_1/opendev.org/zuul/zuul-jobs:
canonical_name: opendev.org/zuul/zuul-jobs
checkout: master
commit: e160f59e0e76c7e8625ec2d174b044a7c92cd32e
untrusted/project_0/opendev.org/zuul/zuul-jobs:
canonical_name: opendev.org/zuul/zuul-jobs
checkout: master
commit: e160f59e0e76c7e8625ec2d174b044a7c92cd32e
untrusted/project_1/opendev.org/opendev/base-jobs:
canonical_name: opendev.org/opendev/base-jobs
checkout: master
commit: 19dc53290a26b20d5c2c5b1bb25f029c4b04a716
playbooks:
- path: untrusted/project/opendev/zuul/zuul-jobs/playbooks/tox/run.yaml
roles:
- checkout: master
checkout_description: zuul branch
link_name: ansible/playbook_0/role_0/base-jobs
link_target: untrusted/project_1/opendev.org/opendev/base-jobs
role_path: ansible/playbook_0/role_0/base-jobs/roles
- checkout: master
checkout_description: playbook branch
link_name: ansible/playbook_0/role_1/zuul-jobs
link_target: untrusted/project_0/opendev.org/zuul/zuul-jobs
role_path: ansible/playbook_0/role_1/zuul-jobs/roles
post_review: false
project:
canonical_hostname: opendev.org
canonical_name: opendev.org/openstack/neutron
name: openstack/neutron
short_name: neutron
src_dir: src/opendev.org/openstack/neutron
projects:
opendev.org/openstack/neutron:
canonical_hostname: opendev.org
canonical_name: opendev.org/openstack/neutron
checkout: master
checkout_description: zuul branch
commit: 7be5a0aff1123b381674191f3baa1ec9c128e0f3
name: openstack/neutron
required: false
short_name: neutron
src_dir: src/opendev.org/openstack/neutron
opendev.org/openstack/requirements:
canonical_hostname: opendev.org
canonical_name: opendev.org/openstack/requirements
checkout: master
checkout_description: zuul branch
commit: 48fb5c24764d91833d8ca7084ee9f183785becd6
name: openstack/requirements
required: true
short_name: requirements
src_dir: src/opendev.org/openstack/requirements
ref: refs/changes/61/829161/3
resources: {}
tenant: openstack
timeout: 3600
voting: true
"""
parsed_fields = {
'build_node': 'zuul-executor',
'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS',
'project': 'openstack/neutron',
'voting': 1,
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master',
'build_change': 829161,
'build_patchset': '3',
'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local',
'hosts_id': ['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'tenant': 'openstack',
'zuul_executor': 'ze07.opendev.org'
}
def _parse_get_yaml(text):
yaml = YAML()
return yaml.load(text)
class _MockedPoolMapAsyncResult:
def __init__(self, func, iterable):
self.func = func
self.iterable = iterable
self.wait = mock.Mock()
# mocked results
self._value = [self.func(i) for i in iterable]
def get(self, timeout=0):
return self._value
class FakeArgs(object):
def __init__(self, config=None, directory=None, host=None, port=None,
username=None, password=None, index_prefix=None, index=None,
doc_type=None, insecure=None, follow=None, workers=None,
chunk_size=None, skip_debug=None, keep=None, debug=None,
wait_time=None):
self.config = config
self.directory = directory
self.host = host
self.port = port
self.username = username
self.password = password
self.index_prefix = index_prefix
self.index = index
self.doc_type = doc_type
self.insecure = insecure
self.follow = follow
self.workers = workers
self.chunk_size = chunk_size
self.skip_debug = skip_debug
self.keep = keep
self.debug = debug
self.wait_time = wait_time
class TestSender(base.TestCase):
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.remove_directory')
@mock.patch('logscraper.logsender.send_to_es')
@mock.patch('logscraper.logsender.get_build_information')
@mock.patch('logscraper.logsender.get_es_client')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", doc_type='_doc',
config='config.yaml'))
def test_send(self, mock_args, mock_es_client, mock_build_info,
mock_send_to_es, mock_remove_dir, mock_info):
build_uuid = '38bf2cdc947643c9bb04f11f40a0f211'
build_files = ['job-result.txt']
directory = '/tmp/testdir'
index = 'logstash-index'
mock_build_info.return_value = parsed_fields
mock_es_client.return_value = 'fake_client_object'
tags = ['test', 'info']
mock_info.return_value = ('job-result.txt', tags)
expected_fields = {
'build_node': 'zuul-executor', 'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS', 'project': 'openstack/neutron',
'voting': 1, 'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check', 'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master', 'build_change': 829161,
'build_patchset': '3', 'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local', 'hosts_id':
['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url': 'https://somehost/829161/3/check/openstack-tox-py39/'
'38bf2cd/job-result.txt',
'tenant': 'openstack', 'zuul_executor': 'ze07.opendev.org',
'filename': 'job-result.txt',
'tags': tags
}
args = logsender.get_arguments()
mock_send_to_es.return_value = True
logsender.send((build_uuid, build_files), args, directory, index)
self.assertTrue(mock_remove_dir.called)
mock_send_to_es.assert_called_with(
"%s/%s/job-result.txt" % (directory, build_uuid), expected_fields,
'fake_client_object', index, None, '_doc', None)
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.remove_directory')
@mock.patch('logscraper.logsender.send_to_es')
@mock.patch('logscraper.logsender.get_build_information')
@mock.patch('logscraper.logsender.get_es_client')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", keep=True, doc_type="_doc"))
def test_send_keep_dir(self, mock_args, mock_es_client, mock_build_info,
mock_send_to_es, mock_remove_dir, mock_info):
build_uuid = '38bf2cdc947643c9bb04f11f40a0f211'
build_files = ['job-result.txt']
directory = '/tmp/testdir'
index = 'logstash-index'
args = logsender.get_arguments()
mock_info.return_value = ('somefile.txt', ['somefile.txt'])
# No metter what is ES status, it should keep dir
mock_send_to_es.return_value = None
logsender.send((build_uuid, build_files), args, directory, index)
self.assertFalse(mock_remove_dir.called)
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.remove_directory')
@mock.patch('logscraper.logsender.send_to_es')
@mock.patch('logscraper.logsender.get_build_information')
@mock.patch('logscraper.logsender.get_es_client')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", keep=False, doc_type="_doc"))
def test_send_error_keep_dir(self, mock_args, mock_es_client,
mock_build_info, mock_send_to_es,
mock_remove_dir, mock_info):
build_uuid = '38bf2cdc947643c9bb04f11f40a0f211'
build_files = ['job-result.txt']
directory = '/tmp/testdir'
index = 'logstash-index'
args = logsender.get_arguments()
mock_info.return_value = ('somefile.txt', ['somefile.txt'])
mock_send_to_es.return_value = None
logsender.send((build_uuid, build_files), args, directory, index)
self.assertFalse(mock_remove_dir.called)
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.doc_iter')
@mock.patch('logscraper.logsender.logline_iter')
@mock.patch('opensearchpy.helpers.bulk')
@mock.patch('logscraper.logsender.open_file')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", index="myindex", workers=1,
chunk_size=1000, doc_type="zuul",
config='config.yaml', skip_debug=False))
def test_send_to_es(self, mock_args, mock_text, mock_bulk, mock_doc_iter,
mock_logline_chunk, mock_file_info):
build_file = 'job-result.txt'
es_fields = parsed_fields
es_client = mock.Mock()
args = logsender.get_arguments()
text = ["2022-02-28 09:39:09.596010 | Job console starting...",
"2022-02-28 09:39:09.610160 | Updating repositories",
"2022-02-28 09:39:09.996235 | Preparing job workspace"]
mock_text.return_value = io.StringIO("\n".join(text))
es_doc = [{
'_index': 'myindex',
'_type': 'zuul',
'_source': {
'build_node': 'zuul-executor',
'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS',
'project': 'openstack/neutron',
'voting': 1,
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master',
'build_change': 829161,
'build_patchset': '3',
'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local',
'hosts_id':
['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'tenant': 'openstack',
'zuul_executor': 'ze07.opendev.org',
'@timestamp': '2022-02-28T09:39:09.596000',
'message': ' Job console starting...'
}
}, {
'_index': 'myindex',
'_type': 'zuul',
'_source': {
'build_node': 'zuul-executor',
'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS',
'project': 'openstack/neutron',
'voting': 1,
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master',
'build_change': 829161,
'build_patchset': '3',
'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local',
'hosts_id':
['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'tenant': 'openstack',
'zuul_executor': 'ze07.opendev.org',
'@timestamp': '2022-02-28T09:39:09.610000',
'message': ' Updating repositories'
}
}, {
'_index': 'myindex',
'_type': 'zuul',
'_source': {
'build_node': 'zuul-executor',
'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS',
'project': 'openstack/neutron',
'voting': 1,
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master',
'build_change': 829161,
'build_patchset': '3',
'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local',
'hosts_id':
['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'tenant': 'openstack',
'zuul_executor': 'ze07.opendev.org',
'@timestamp': '2022-02-28T09:39:09.996000',
'message': ' Preparing job workspace'
}
}]
mock_doc_iter.return_value = es_doc
logsender.send_to_es(build_file, es_fields, es_client, args.index,
args.chunk_size, args.doc_type, args.skip_debug)
self.assertEqual(1, mock_bulk.call_count)
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.doc_iter')
@mock.patch('logscraper.logsender.logline_iter')
@mock.patch('opensearchpy.helpers.bulk')
@mock.patch('logscraper.logsender.open_file')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", index="myindex", workers=1,
chunk_size=1000, doc_type="zuul",
config='test.yaml', skip_debug=False))
def test_send_to_es_error(self, mock_args, mock_text, mock_bulk,
mock_logline, mock_doc_iter, mock_file_info):
build_file = 'job-result.txt'
es_fields = parsed_fields
es_client = mock.Mock()
args = logsender.get_arguments()
text = ["2022-02-28 09:39:09.596010 | Job console starting...",
"2022-02-28 09:39:09.610160 | Updating repositories",
"2022-02-28 09:39:09.996235 | Preparing job workspace"]
mock_text.return_value = io.StringIO("\n".join(text))
es_doc = [{
'_index': 'myindex',
'_type': 'zuul',
'_source': {
'build_node': 'zuul-executor',
'build_name': 'openstack-tox-py39',
'build_status': 'SUCCESS',
'project': 'openstack/neutron',
'voting': 1,
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_branch': 'master',
'build_change': 829161,
'build_patchset': '3',
'build_newrev': 'UNKNOWN',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'node_provider': 'local',
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'tenant': 'openstack',
'zuul_executor': 'ze07.opendev.org',
'@timestamp': '2022-02-28T09:39:09.596000',
'message': ' Job console starting...'
}
}]
mock_doc_iter.return_value = es_doc
mock_bulk.side_effect = TransportError(500, "InternalServerError", {
"error": {
"root_cause": [{
"type": "error",
"reason": "error reason"
}]
}
})
send_status = logsender.send_to_es(build_file, es_fields, es_client,
args.index, args.chunk_size,
args.doc_type, args.skip_debug)
self.assertIsNone(send_status)
@mock.patch('json.load')
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('opensearchpy.helpers.bulk')
@mock.patch('logscraper.logsender.open_file')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", index="myindex", workers=1,
chunk_size=1000, doc_type="zuul",
config='test.yaml', skip_debug=False))
def test_send_to_es_json(self, mock_args, mock_text, mock_bulk,
mock_file_info, mock_json_load):
build_file = 'performance.json'
es_fields = parsed_fields
es_client = mock.Mock()
args = logsender.get_arguments()
text = {
"transient": {
"cluster.index_state_management.coordinator.sweep_period": "1m"
},
"report": {
"timestamp": "2022-04-18T19:51:55.394370",
"hostname": "ubuntu-focal-rax-dfw-0029359041"
}
}
mock_json_load.return_value = text
mock_text.new_callable = mock.mock_open(read_data=str(text))
es_doc = {
'_index': 'myindex',
'_source': {
'@timestamp': '2022-04-18T19:51:55',
'build_branch': 'master', 'build_change': 829161,
'build_name': 'openstack-tox-py39', 'build_newrev': 'UNKNOWN',
'build_node': 'zuul-executor', 'build_patchset': '3',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_status': 'SUCCESS',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'hosts_id':
['ed82a4a59ac22bf396288f0b93bf1c658af932130f9d336aad528f21'],
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'message': '{"transient": '
'{"cluster.index_state_management.coordinator.sweep_period": '
'"1m"}, "report": {"timestamp": '
'"2022-04-18T19:51:55.394370", "hostname": '
'"ubuntu-focal-rax-dfw-0029359041"}}',
'node_provider': 'local', 'project': 'openstack/neutron',
'tenant': 'openstack', 'voting': 1,
'zuul_executor': 'ze07.opendev.org'
}, '_type': 'zuul'
}
logsender.send_to_es(build_file, es_fields, es_client, args.index,
args.chunk_size, args.doc_type, args.skip_debug)
self.assertEqual(es_doc, list(mock_bulk.call_args.args[1])[0])
self.assertEqual(1, mock_bulk.call_count)
@mock.patch('logscraper.logsender.get_file_info')
@mock.patch('logscraper.logsender.doc_iter')
@mock.patch('logscraper.logsender.logline_iter')
@mock.patch('opensearchpy.helpers.bulk')
@mock.patch('logscraper.logsender.open_file')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", index="myindex", workers=1,
chunk_size=1000, doc_type="zuul",
config='test.yaml', skip_debug=True))
def test_send_to_es_skip_debug(self, mock_args, mock_text, mock_bulk,
mock_logline, mock_doc_iter,
mock_file_info):
build_file = 'job-result.txt'
es_fields = parsed_fields
es_client = mock.Mock()
args = logsender.get_arguments()
text = ["2022-02-28 09:39:09.596010 | Job console starting...",
"2022-02-28 09:39:09.610160 | DEBUG Updating repositories",
"2022-02-28 09:39:09.996235 | DEBUG Preparing job workspace"]
mock_text.return_value = io.StringIO("\n".join(text))
es_doc = [{
'_index': 'myindex',
'_source': {
'@timestamp': '2022-02-28T09:39:09.596000',
'build_branch': 'master',
'build_change': 829161,
'build_name': 'openstack-tox-py39',
'build_newrev': 'UNKNOWN',
'build_node': 'zuul-executor',
'build_patchset': '3',
'build_queue': 'check',
'build_ref': 'refs/changes/61/829161/3',
'build_set': '52b29e0e716a4436bd20eed47fa396ce',
'build_status': 'SUCCESS',
'build_uuid': '38bf2cdc947643c9bb04f11f40a0f211',
'log_url':
'https://somehost/829161/3/check/openstack-tox-py39/38bf2cd/',
'message': ' Job console starting...',
'node_provider': 'local',
'project': 'openstack/neutron',
'tenant': 'openstack',
'voting': 1,
'zuul_executor': 'ze07.opendev.org'},
'_type': 'zuul'}]
mock_doc_iter.return_value = es_doc
logsender.send_to_es(build_file, es_fields, es_client, args.index,
args.chunk_size, args.doc_type, args.skip_debug)
self.assertEqual(es_doc, list(mock_bulk.call_args.args[1]))
self.assertEqual(1, mock_bulk.call_count)
@mock.patch('logscraper.logsender.logline_iter')
def test_doc_iter(self, mock_logline):
text = [(datetime.datetime(2022, 2, 28, 9, 39, 9, 596000),
'2022-02-28 09:39:09.596010 | Job console starting...\n'),
(datetime.datetime(2022, 2, 28, 9, 39, 9, 610000),
'2022-02-28 09:39:09.610160 | Updating repositories\n')]
expected_chunk = [{
'_index': 'someindex',
'_source': {
'@timestamp': '2022-02-28T09:39:09.596000',
'field': 'test',
'message': 'Job console starting...'
},
'_type': '_doc'
}, {
'_index': 'someindex',
'_source': {
'@timestamp': '2022-02-28T09:39:09.610000',
'field': 'test',
'message': 'Updating repositories'
},
'_type': '_doc'
}]
chunk_text = list(logsender.doc_iter(
text, 'someindex', {'field': 'test'}, '_doc'))
self.assertEqual(expected_chunk, chunk_text)
def test_logline_iter(self):
text = """2022-02-28 09:39:09.596 | Job console starting...
2022-02-28 09:39:09.610 | Updating repositories
2022-02-28 09:39:09.996 | Preparing job workspace"""
expected_data = [
(datetime.datetime(2022, 2, 28, 9, 39, 9, 596000),
'2022-02-28 09:39:09.596 | Job console starting...\n'),
(datetime.datetime(2022, 2, 28, 9, 39, 9, 610000),
'2022-02-28 09:39:09.610 | Updating repositories\n'),
(datetime.datetime(2022, 2, 28, 9, 39, 9, 996000),
'2022-02-28 09:39:09.996 | Preparing job workspace')
]
skip_debug = False
readed_data = mock.mock_open(read_data=text)
with mock.patch('builtins.open', readed_data) as mocked_open_file:
generated_text = list(logsender.logline_iter('nofile', skip_debug))
self.assertEqual(expected_data, generated_text)
self.assertTrue(mocked_open_file.called)
@mock.patch('json.load')
@mock.patch('logscraper.logsender.open_file')
def test_json_iter(self, mock_open_file, mock_json_load):
text = {
"transient": {
"cluster.index_state_management.coordinator.sweep_period": "1m"
},
"report": {
"timestamp": "2022-04-18T19:51:55.394370",
"hostname": "ubuntu-focal-rax-dfw-0029359041"
}
}
mock_json_load.return_value = text
result = logsender.json_iter('somefile')
self.assertEqual(datetime.datetime(2022, 4, 18, 19, 51, 55),
list(result)[0][0])
result = logsender.json_iter('somefile')
self.assertEqual(str(text).replace("\'", "\""), list(result)[0][1])
@mock.patch('logscraper.logsender.read_yaml_file',
side_effect=[_parse_get_yaml(buildinfo),
_parse_get_yaml(inventory_info)])
def test_makeFields(self, mock_read_yaml_file):
buildinfo_yaml = logsender.get_build_info('fake_dir')
inventory_info_yaml = logsender.get_inventory_info('other_fake_dir')
generated_info = logsender.makeFields(inventory_info_yaml,
buildinfo_yaml)
self.assertEqual(parsed_fields, generated_info)
def test_get_message(self):
line_1 = "28-02-2022 09:44:58.839036 | Some message"
line_2 = "2022-02-28 09:44:58.839036 | Other message | other log info"
self.assertEqual("Some message", logsender.get_message(line_1))
self.assertEqual("Other message | other log info",
logsender.get_message(line_2))
def test_get_timestamp(self):
for (line, expected) in [
("2022-02-28 09:44:58.839036 | Other message",
datetime.datetime(2022, 2, 28, 9, 44, 58, 839036)),
("2022-03-21T08:39:18.220547Z | Last metadata expiration",
datetime.datetime(2022, 3, 21, 8, 39, 18, 220547)),
("Mar 31 04:50:23.795709 nested-virt some log",
datetime.datetime(2022, 3, 31, 4, 50, 23, 795700)),
("Mar 21 09:33:23 fedora-rax-dfw-0028920567 sudo[2786]: zuul ",
datetime.datetime(datetime.date.today().year, 3, 21, 9, 33, 23)),
("2022-03-23T13:09:08.644Z|00040|connmgr|INFO|br-int: added",
datetime.datetime(2022, 3, 23, 13, 9, 8)),
("Friday 25 February 2022 09:27:51 +0000 (0:00:00.056)",
datetime.datetime(2022, 2, 25, 9, 27, 51)),
]:
got = logsender.get_timestamp(line)
self.assertEqual(expected, got)
@mock.patch('ruamel.yaml.YAML.load')
@mock.patch('logscraper.logsender.open_file')
def test_get_file_info(self, mock_open_file, mock_yaml):
config = {'files': [{
'name': 'job-output.txt',
'tags': ['console', 'console.html']
}, {'name': 'logs/undercloud/var/log/extra/logstash.txt',
'tags': ['console', 'postpci']}]}
expected_output_1 = ('logs/undercloud/var/log/extra/logstash.txt',
['console', 'postpci', 'logstash.txt'])
expected_output_2 = ('job-output.txt',
['console', 'console.html', 'job-output.txt'])
expected_output_3 = ('somejob.txt', ['somejob.txt'])
mock_yaml.return_value = config
self.assertEqual(expected_output_1, logsender.get_file_info(
config, './9e7bbfb1a4614bc4be06776658fa888f/logstash.txt'))
self.assertEqual(expected_output_2, logsender.get_file_info(
config, './9e7bbfb1a4614bc4be06776658fa888f/job-output.txt'))
self.assertEqual(expected_output_3, logsender.get_file_info(
config, './9e7bbfb1a4614bc4be06776658fa888f/somejob.txt'))
@mock.patch('logscraper.logsender.get_es_client')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
index_prefix="my-index-", workers=2))
def test_get_index(self, mock_args, mock_es_client):
args = logsender.get_arguments()
expected_index = ("my-index-%s" %
datetime.datetime.today().strftime('%Y.%m.%d'))
index = logsender.get_index(args)
self.assertEqual(expected_index, index)
@mock.patch('logscraper.logsender.send')
@mock.patch('logscraper.logsender.get_index')
@mock.patch('argparse.ArgumentParser.parse_args', return_value=FakeArgs(
directory="/tmp/testdir", workers=2, index='myindex'))
def test_prepare_and_send(self, mock_args, mock_index, mock_send):
args = logsender.get_arguments()
ready_directories = {'builduuid': ['job-result.txt']}
mock_index.return_value = args.index
with mock.patch(
'multiprocessing.pool.Pool.starmap_async',
lambda self, func, iterable, chunksize=None,
callback=None,
error_callback=None: _MockedPoolMapAsyncResult(func, iterable),
):
logsender.prepare_and_send(ready_directories, args)
self.assertTrue(mock_send.called)
mock_send.assert_called_with((('builduuid', ['job-result.txt']),
args, args.directory, args.index))
| 42.616438 | 79 | 0.602028 |
79585c01bda232f81e868ce0914f10776f98b381 | 1,397 | py | Python | setup.py | EFS-OpenSource/calibration-framework | 7b306e4bbe6361d411b209759b7ba3d016bd0d17 | [
"Apache-2.0"
] | 2 | 2021-09-01T13:21:22.000Z | 2022-03-23T07:13:12.000Z | setup.py | EFS-OpenSource/calibration-framework | 7b306e4bbe6361d411b209759b7ba3d016bd0d17 | [
"Apache-2.0"
] | null | null | null | setup.py | EFS-OpenSource/calibration-framework | 7b306e4bbe6361d411b209759b7ba3d016bd0d17 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="netcal",
version="1.2.0",
author="Fabian Kueppers",
author_email="fabian.kueppers@hs-ruhrwest.de",
description="Python Framework to calibrate confidence estimates of classifiers like Neural Networks",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/fabiankueppers/calibration-framework",
packages=setuptools.find_packages(),
install_requires = ['numpy>=1.17', 'scipy>=1.3', 'matplotlib>=3.1', 'scikit-learn>=0.21', 'torch>=1.4', 'torchvision>=0.5.0', 'tqdm>=4.40', 'pyro-ppl>=1.3', 'tikzplotlib>=0.9.8', 'tensorboard>=2.2'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 43.65625 | 203 | 0.697208 |
79585d2b07f316e82ca019ead48f6bbd5d6a34bc | 68,901 | py | Python | nova/virt/ironic/driver.py | andymcc/nova | f77f6d33f6b10ec6ce7bc2aa8aa99366e682059e | [
"Apache-2.0"
] | null | null | null | nova/virt/ironic/driver.py | andymcc/nova | f77f6d33f6b10ec6ce7bc2aa8aa99366e682059e | [
"Apache-2.0"
] | null | null | null | nova/virt/ironic/driver.py | andymcc/nova | f77f6d33f6b10ec6ce7bc2aa8aa99366e682059e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver wrapping the Ironic API, such that Nova may provision
bare metal resources.
"""
import base64
import gzip
import shutil
import tempfile
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import importutils
import six
import six.moves.urllib.parse as urlparse
from tooz import hashring as hash_ring
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova.console import type as console_type
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import objects
from nova.objects import fields as obj_fields
from nova import servicegroup
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper
from nova.virt.ironic import ironic_states
from nova.virt.ironic import patcher
from nova.virt import netutils
ironic = None
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
_POWER_STATE_MAP = {
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
_UNPROVISION_STATES = (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL,
ironic_states.ERROR, ironic_states.DEPLOYWAIT,
ironic_states.DEPLOYING)
_NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'last_error', 'maintenance',
'properties', 'instance_uuid')
# Console state checking interval in seconds
_CONSOLE_STATE_CHECKING_INTERVAL = 1
# Number of hash ring partitions per service
# 5 should be fine for most deployments, as an experimental feature.
_HASH_RING_PARTITIONS = 2 ** 5
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch,
obj_fields.HVType.BAREMETAL,
obj_fields.VMMode.HVM)]
def _log_ironic_polling(what, node, instance):
power_state = (None if node.power_state is None else
'"%s"' % node.power_state)
tgt_power_state = (None if node.target_power_state is None else
'"%s"' % node.target_power_state)
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'power_state=%(power_state)s, '
'target_power_state=%(tgt_power_state)s, '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
power_state=power_state,
tgt_power_state=tgt_power_state,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": False
}
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
self.servicegroup_api = servicegroup.API()
self._refresh_hash_ring(nova_context.get_admin_context())
self.ironicclient = client_wrapper.IronicClientWrapper()
def _get_node(self, node_uuid):
"""Get a node by its UUID."""
return self.ironicclient.call('node.get', node_uuid,
fields=_NODE_FIELDS)
def _validate_instance_and_node(self, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
return self.ironicclient.call('node.get_by_instance_uuid',
instance.uuid, fields=_NODE_FIELDS)
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance.uuid)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power, provision and maintenance
state. This is called after _node_resources_used, so any node that
is not used and not in AVAILABLE should be considered in a 'bad' state,
and unavailable for scheduling. Returns True if unacceptable.
"""
bad_power_states = [
ironic_states.ERROR, ironic_states.NOSTATE]
# keep NOSTATE around for compatibility
good_provision_states = [
ironic_states.AVAILABLE, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_power_states or
node_obj.provision_state not in good_provision_states or
(node_obj.provision_state in good_provision_states and
node_obj.instance_uuid is not None))
def _node_resources_used(self, node_obj):
"""Determine whether the node's resources are currently used.
Determines whether the node's resources should be considered used
or not. A node is used when it is either in the process of putting
a new instance on the node, has an instance on the node, or is in
the process of cleaning up from a deleted instance. Returns True if
used.
If we report resources as consumed for a node that does not have an
instance on it, the resource tracker will notice there's no instances
consuming resources and try to correct us. So only nodes with an
instance attached should report as consumed here.
"""
return node_obj.instance_uuid is not None
def _parse_node_properties(self, node):
"""Helper method to parse the node's properties."""
properties = {}
for prop in ('cpus', 'memory_mb', 'local_gb'):
try:
properties[prop] = int(node.properties.get(prop, 0))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer.'),
{'uuid': node.uuid, 'prop': prop})
properties[prop] = 0
raw_cpu_arch = node.properties.get('cpu_arch', None)
try:
cpu_arch = obj_fields.Architecture.canonicalize(raw_cpu_arch)
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)
properties['cpu_arch'] = cpu_arch
properties['raw_cpu_arch'] = raw_cpu_arch
properties['capabilities'] = node.properties.get('capabilities')
return properties
def _parse_node_instance_info(self, node, props):
"""Helper method to parse the node's instance info.
If a property cannot be looked up via instance_info, use the original
value from the properties dict. This is most likely to be correct;
it should only be incorrect if the properties were changed directly
in Ironic while an instance was deployed.
"""
instance_info = {}
# add this key because it's different in instance_info for some reason
props['vcpus'] = props['cpus']
for prop in ('vcpus', 'memory_mb', 'local_gb'):
original = props[prop]
try:
instance_info[prop] = int(node.instance_info.get(prop,
original))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer but its value '
'is "%(value)s".'),
{'uuid': node.uuid, 'prop': prop,
'value': node.instance_info.get(prop)})
instance_info[prop] = original
return instance_info
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
properties = self._parse_node_properties(node)
vcpus = properties['cpus']
memory_mb = properties['memory_mb']
local_gb = properties['local_gb']
raw_cpu_arch = properties['raw_cpu_arch']
cpu_arch = properties['cpu_arch']
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
# NOTE(dprince): we use the raw cpu_arch here because extra_specs
# filters aren't canonicalized
nodes_extra_specs['cpu_arch'] = raw_cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = properties['capabilities']
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0].strip()] = parts[1]
else:
LOG.warning(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if self._node_resources_used(node):
# Node is in the process of deploying, is deployed, or is in
# the process of cleaning up from a deploy. Report all of its
# resources as in use.
instance_info = self._parse_node_instance_info(node, properties)
# Use instance_info instead of properties here is because the
# properties of a deployed node can be changed which will count
# as available resources.
vcpus_used = vcpus = instance_info['vcpus']
memory_mb_used = memory_mb = instance_info['memory_mb']
local_gb_used = local_gb = instance_info['local_gb']
# Always checking allows us to catch the case where Nova thinks there
# are available resources on the Node, but Ironic does not (because it
# is not in a usable state): https://launchpad.net/bugs/1503453
if self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
'resource_class': node.resource_class,
# The Ironic driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_available_least': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'supported_instances': _get_nodes_supported_instances(cpu_arch),
'stats': nodes_extra_specs,
'numa_topology': None,
}
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_instance_info_to_node(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid})
try:
# FIXME(lucasagomes): The "retry_on_conflict" parameter was added
# to basically causes the deployment to fail faster in case the
# node picked by the scheduler is already associated with another
# instance due bug #1341420.
self.ironicclient.call('node.update', node.uuid, patch,
retry_on_conflict=False)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance.uuid})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _remove_instance_info_from_node(self, node, instance):
patch = [{'path': '/instance_info', 'op': 'remove'},
{'path': '/instance_uuid', 'op': 'remove'}]
try:
self.ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest as e:
LOG.warning(_LW("Failed to remove deploy parameters from node "
"%(node)s when unprovisioning the instance "
"%(instance)s: %(reason)s"),
{'node': node.uuid, 'instance': instance.uuid,
'reason': six.text_type(e)})
def _cleanup_deploy(self, node, instance, network_info):
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
instance.refresh()
if (instance.task_state == task_states.DELETING or
instance.vm_state in (vm_states.ERROR, vm_states.DELETED)):
raise exception.InstanceDeployFailure(
_("Instance %s provisioning was aborted") % instance.uuid)
node = self._validate_instance_and_node(instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state in (ironic_states.DELETED,
ironic_states.AVAILABLE):
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state in (ironic_states.NOSTATE,
ironic_states.AVAILABLE):
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance.uuid, 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def _wait_for_power_state(self, instance, message):
"""Wait for the node to complete a power state change."""
node = self._validate_instance_and_node(instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
_log_ironic_polling(message, node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return client_wrapper.IRONIC_API_VERSION[0]
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
try:
self._validate_instance_and_node(instance)
return True
except exception.InstanceNotFound:
return False
def _get_node_list(self, **kwargs):
"""Helper function to return the list of nodes.
If unable to connect ironic server, an empty list is returned.
:returns: a list of raw node from ironic
"""
try:
node_list = self.ironicclient.call("node.list", **kwargs)
except exception.NovaException:
node_list = []
return node_list
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_list = self._get_node_list(associated=True, limit=0)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
return list(n.instance_uuid
for n in self._get_node_list(associated=True, limit=0))
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
try:
self._get_node(nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_hash_ring(self, ctxt):
service_list = objects.ServiceList.get_all_computes_by_hv_type(
ctxt, self._get_hypervisor_type())
services = set()
for svc in service_list:
is_up = self.servicegroup_api.service_is_up(svc)
if is_up:
services.add(svc.host)
# NOTE(jroll): always make sure this service is in the list, because
# only services that have something registered in the compute_nodes
# table will be here so far, and we might be brand new.
services.add(CONF.host)
self.hash_ring = hash_ring.HashRing(services,
partitions=_HASH_RING_PARTITIONS)
def _refresh_cache(self):
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
ctxt = nova_context.get_admin_context()
self._refresh_hash_ring(ctxt)
instances = objects.InstanceList.get_uuids_by_host(ctxt, CONF.host)
node_cache = {}
for node in self._get_node_list(detail=True, limit=0):
# NOTE(jroll): we always manage the nodes for instances we manage
if node.instance_uuid in instances:
node_cache[node.uuid] = node
# NOTE(jroll): check if the node matches us in the hash ring, and
# does not have an instance_uuid (which would imply the node has
# an instance managed by another compute service).
# Note that this means nodes with an instance that was deleted in
# nova while the service was down, and not yet reaped, will not be
# reported until the periodic task cleans it up.
elif (node.instance_uuid is None and
CONF.host in
self.hash_ring.get_nodes(node.uuid.encode('utf-8'))):
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of Ironic nodes managed by this compute service.
We use consistent hashing to distribute Ironic nodes between all
available compute services. The subset of nodes managed by a given
compute service is determined by the following rules:
* any node with an instance managed by the compute service
* any node that is mapped to the compute service on the hash ring
* no nodes with instances managed by another compute service
The ring is rebalanced as nova-compute services are brought up and
down. Note that this rebalance does not happen at the same time for
all compute services, so a node may be managed by multiple compute
services for a small amount of time.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_inventory(self, nodename):
"""Return a dict, keyed by resource class, of inventory information for
the supplied node.
"""
node = self._node_from_cache(nodename)
info = self._node_resource(node)
# TODO(jaypipes): Completely remove the reporting of VCPU, MEMORY_MB,
# and DISK_GB resource classes in early Queens when Ironic nodes will
# *always* return the custom resource class that represents the
# baremetal node class in an atomic, singular unit.
if info['vcpus'] == 0:
# NOTE(jaypipes): The driver can return 0-valued vcpus when the
# node is "disabled". In the future, we should detach inventory
# accounting from the concept of a node being disabled or not. The
# two things don't really have anything to do with each other.
return {}
result = {
obj_fields.ResourceClass.VCPU: {
'total': info['vcpus'],
'reserved': 0,
'min_unit': 1,
'max_unit': info['vcpus'],
'step_size': 1,
'allocation_ratio': 1.0,
},
obj_fields.ResourceClass.MEMORY_MB: {
'total': info['memory_mb'],
'reserved': 0,
'min_unit': 1,
'max_unit': info['memory_mb'],
'step_size': 1,
'allocation_ratio': 1.0,
},
obj_fields.ResourceClass.DISK_GB: {
'total': info['local_gb'],
'reserved': 0,
'min_unit': 1,
'max_unit': info['local_gb'],
'step_size': 1,
'allocation_ratio': 1.0,
},
}
rc_name = info.get('resource_class')
if rc_name is not None:
# TODO(jaypipes): Raise an exception in Queens if Ironic doesn't
# report a resource class for the node
norm_name = obj_fields.ResourceClass.normalize_name(rc_name)
if norm_name is not None:
result[norm_name] = {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
}
return result
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
node = self._node_from_cache(nodename)
return self._node_resource(node)
def _node_from_cache(self, nodename):
"""Returns a node from the cache, retrieving the node from Ironic API
if the node doesn't yet exist in the cache.
"""
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
return self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self._get_node(nodename)
self.node_cache[nodename] = node
return node
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a InstanceInfo object
"""
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
return hardware.InstanceInfo(
state=map_power_state(ironic_states.NOSTATE))
properties = self._parse_node_properties(node)
memory_kib = properties['memory_mb'] * 1024
if memory_kib == 0:
LOG.warning(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
num_cpu = properties['cpus']
if num_cpu == 0:
LOG.warning(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
return hardware.InstanceInfo(state=map_power_state(node.power_state),
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=num_cpu)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def _get_network_metadata(self, node, network_info):
"""Gets a more complete representation of the instance network info.
This data is exposed as network_data.json in the metadata service and
the config drive.
:param node: The node object.
:param network_info: Instance network information.
"""
base_metadata = netutils.get_network_metadata(network_info)
# TODO(vdrok): change to doing a single "detailed vif list" call,
# when added to ironic API, response to that will contain all
# necessary information. Then we will be able to avoid looking at
# internal_info/extra fields.
ports = self.ironicclient.call("node.list_ports",
node.uuid, detail=True)
portgroups = self.ironicclient.call("portgroup.list", node=node.uuid,
detail=True)
vif_id_to_objects = {'ports': {}, 'portgroups': {}}
for collection, name in ((ports, 'ports'), (portgroups, 'portgroups')):
for p in collection:
vif_id = (p.internal_info.get('tenant_vif_port_id') or
p.extra.get('vif_port_id'))
if vif_id:
vif_id_to_objects[name][vif_id] = p
additional_links = []
for link in base_metadata['links']:
vif_id = link['vif_id']
if vif_id in vif_id_to_objects['portgroups']:
pg = vif_id_to_objects['portgroups'][vif_id]
pg_ports = [p for p in ports if p.portgroup_uuid == pg.uuid]
link.update({'type': 'bond', 'bond_mode': pg.mode,
'bond_links': []})
# If address is set on the portgroup, an (ironic) vif-attach
# call has already updated neutron with the port address;
# reflect it here. Otherwise, an address generated by neutron
# will be used instead (code is elsewhere to handle this case).
if pg.address:
link.update({'ethernet_mac_address': pg.address})
for prop in pg.properties:
# These properties are the bonding driver options described
# at https://www.kernel.org/doc/Documentation/networking/bonding.txt # noqa
# cloud-init checks the same way, parameter name has to
# start with bond
key = prop if prop.startswith('bond') else 'bond_%s' % prop
link[key] = pg.properties[prop]
for port in pg_ports:
# This won't cause any duplicates to be added. A port
# cannot be in more than one port group for the same
# node.
additional_links.append({
'id': port.uuid,
'type': 'phy', 'ethernet_mac_address': port.address,
})
link['bond_links'].append(port.uuid)
elif vif_id in vif_id_to_objects['ports']:
p = vif_id_to_objects['ports'][vif_id]
# Ironic updates neutron port's address during attachment
link.update({'ethernet_mac_address': p.address,
'type': 'phy'})
base_metadata['links'].extend(additional_links)
return base_metadata
def _generate_configdrive(self, context, instance, node, network_info,
extra_md=None, files=None):
"""Generate a config drive.
:param instance: The instance object.
:param node: The node object.
:param network_info: Instance network information.
:param extra_md: Optional, extra metadata to be added to the
configdrive.
:param files: Optional, a list of paths to files to be added to
the configdrive.
"""
if not extra_md:
extra_md = {}
i_meta = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info,
network_metadata=self._get_network_metadata(node, network_info),
request_context=context)
with tempfile.NamedTemporaryFile() as uncompressed:
with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb:
cdb.make_drive(uncompressed.name)
with tempfile.NamedTemporaryFile() as compressed:
# compress config drive
with gzip.GzipFile(fileobj=compressed, mode='wb') as gzipped:
uncompressed.seek(0)
shutil.copyfileobj(uncompressed, gzipped)
# base64 encode config drive
compressed.seek(0)
return base64.b64encode(compressed.read())
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image dict returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in
instance.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
LOG.debug('Spawn called for instance', instance=instance)
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise ironic.exc.BadRequest(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
flavor = instance.flavor
self._add_instance_info_to_node(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor.ephemeral_gb:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = self.ironicclient.call("node.validate", node_uuid)
if (not validate_chk.deploy.get('result')
or not validate_chk.power.get('result')):
# something is wrong. undo what we have done
self._cleanup_deploy(node, instance, network_info)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
self._cleanup_deploy(node, instance, network_info)
# Config drive
configdrive_value = None
if configdrive.required_by(instance):
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
try:
configdrive_value = self._generate_configdrive(
context, instance, node, network_info, extra_md=extra_md,
files=injected_files)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to build configdrive: %s") %
six.text_type(e))
LOG.error(msg, instance=instance)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI("Config drive for instance %(instance)s on "
"baremetal node %(node)s created."),
{'instance': instance['uuid'], 'node': node_uuid})
# trigger the node deploy
try:
self.ironicclient.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE,
configdrive=configdrive_value)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance.uuid,
'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully provisioned Ironic node %s'),
node.uuid, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
self.ironicclient.call("node.set_provision_state", node.uuid,
"deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.debug("Instance already removed from Ironic",
instance=instance)
raise loopingcall.LoopingCallDone()
if node.provision_state in (ironic_states.NOSTATE,
ironic_states.CLEANING,
ironic_states.CLEANWAIT,
ironic_states.CLEANFAIL,
ironic_states.AVAILABLE):
# From a user standpoint, the node is unprovisioned. If a node
# gets into CLEANFAIL state, it must be fixed in Ironic, but we
# can consider the instance unprovisioned.
LOG.debug("Ironic node %(node)s is in state %(state)s, "
"instance is now unprovisioned.",
dict(node=node.uuid, state=node.provision_state),
instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= CONF.ironic.api_max_retries + 1:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
"""
LOG.debug('Destroy called for instance', instance=instance)
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance.uuid)
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in _UNPROVISION_STATES:
self._unprovision(instance, node)
else:
# NOTE(hshiina): if spawn() fails before ironic starts
# provisioning, instance information should be
# removed from ironic node.
self._remove_instance_info_from_node(node, instance)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI('Successfully unprovisioned Ironic node %s'),
node.uuid, instance=instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
LOG.debug('Reboot(type %s) called for instance',
reboot_type, instance=instance)
node = self._validate_instance_and_node(instance)
hard = True
if reboot_type == 'SOFT':
try:
self.ironicclient.call("node.set_power_state", node.uuid,
'reboot', soft=True)
hard = False
except ironic.exc.BadRequest as exc:
LOG.info(_LI('Soft reboot is not supported by ironic hardware '
'driver. Falling back to hard reboot: %s'),
exc,
instance=instance)
if hard:
self.ironicclient.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully rebooted(type %(type)s) Ironic node '
'%(node)s'),
{'type': ('HARD' if hard else 'SOFT'),
'node': node.uuid},
instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. If it is set,
soft power off is attempted before hard power off.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver. Retrying depends on
Ironic hardware driver.
"""
LOG.debug('Power off called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
if timeout:
try:
self.ironicclient.call("node.set_power_state", node.uuid,
'off', soft=True, timeout=timeout)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'soft power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
node = self._validate_instance_and_node(instance)
if node.power_state == ironic_states.POWER_OFF:
LOG.info(_LI('Successfully soft powered off Ironic node '
'%s'),
node.uuid, instance=instance)
return
LOG.info(_LI("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"within the required timeout %(timeout)d "
"seconds due to error: %(reason)s. "
"Attempting hard power off."),
{'instance': instance.uuid,
'timeout': timeout,
'node': node.uuid,
'reason': node.last_error},
instance=instance)
except ironic.exc.ClientException as e:
LOG.info(_LI("Failed to soft power off instance "
"%(instance)s on baremetal node %(node)s "
"due to error: %(reason)s. "
"Attempting hard power off."),
{'instance': instance.uuid,
'node': node.uuid,
'reason': e},
instance=instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully hard powered off Ironic node %s'),
node.uuid, instance=instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
LOG.debug('Power on called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered on Ironic node %s'),
node.uuid, instance=instance)
def trigger_crash_dump(self, instance):
"""Trigger crash dump mechanism on the given instance.
Stalling instances can be triggered to dump the crash data. How the
guest OS reacts in details, depends on the configuration of it.
:param instance: The instance where the crash dump should be triggered.
:return: None
"""
LOG.debug('Trigger crash dump called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.inject_nmi", node.uuid)
LOG.info(_LI('Successfully triggered crash dump into Ironic node %s'),
node.uuid, instance=instance)
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
for vif in network_info:
port_id = six.text_type(vif['id'])
try:
self.ironicclient.call("node.vif_attach", node.uuid, port_id,
retry_on_conflict=False)
except ironic.exc.BadRequest as e:
msg = (_("Cannot attach VIF %(vif)s to the node %(node)s due "
"to error: %(err)s") % {'vif': port_id,
'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.VirtualInterfacePlugException(msg)
except ironic.exc.Conflict:
# NOTE (vsaienko) Pass since VIF already attached.
pass
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
if not network_info:
return
for vif in network_info:
port_id = six.text_type(vif['id'])
try:
self.ironicclient.call("node.vif_detach", node.uuid,
port_id)
except ironic.exc.BadRequest:
LOG.debug("VIF %(vif)s isn't attached to Ironic node %(node)s",
{'vif': port_id, 'node': node.uuid})
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
node = self._get_node(instance.node)
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
node = self._get_node(instance.node)
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
node = self._get_node(node_uuid)
self._add_instance_info_to_node(node, instance, image_meta,
instance.flavor, preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
self.ironicclient.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance.uuid,
'reason': six.text_type(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance)
def network_binding_host_id(self, context, instance):
"""Get host ID to associate with network ports.
This defines the binding:host_id parameter to the port-create calls for
Neutron. If using the neutron network interface (separate networks for
the control plane and tenants), return None here to indicate that the
port should not yet be bound; Ironic will make a port-update call to
Neutron later to tell Neutron to bind the port.
NOTE: the late binding is important for security. If an ML2 mechanism
manages to connect the tenant network to the baremetal machine before
deployment is done (e.g. port-create time), then the tenant potentially
has access to the deploy agent, which may contain firmware blobs or
secrets. ML2 mechanisms may be able to connect the port without the
switchport info that comes from ironic, if they store that switchport
info for some reason. As such, we should *never* pass binding:host_id
in the port-create call when using the 'neutron' network_interface,
because a null binding:host_id indicates to Neutron that it should
not connect the port yet.
:param context: request context
:param instance: nova.objects.instance.Instance that the network
ports will be associated with
:returns: None
"""
# NOTE(vsaienko) Ironic will set binding:host_id later with port-update
# call when updating mac address or setting binding:profile
# to tell Neutron to bind the port.
return None
def _get_node_console_with_reset(self, instance):
"""Acquire console information for an instance.
If the console is enabled, the console will be re-enabled
before returning.
:param instance: nova instance
:return: a dictionary with below values
{ 'node': ironic node
'console_info': node console info }
:raise ConsoleNotAvailable: if console is unavailable
for the instance
"""
node = self._validate_instance_and_node(instance)
node_uuid = node.uuid
def _get_console():
"""Request ironicclient to acquire node console."""
try:
return self.ironicclient.call('node.get_console', node_uuid)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to acquire console information for '
'instance %(inst)s: %(reason)s'),
{'inst': instance.uuid,
'reason': e})
raise exception.ConsoleNotAvailable()
def _wait_state(state):
"""Wait for the expected console mode to be set on node."""
console = _get_console()
if console['console_enabled'] == state:
raise loopingcall.LoopingCallDone(retvalue=console)
_log_ironic_polling('set console mode', node, instance)
# Return False to start backing off
return False
def _enable_console(mode):
"""Request ironicclient to enable/disable node console."""
try:
self.ironicclient.call('node.set_console_mode', node_uuid,
mode)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
LOG.error(_LE('Failed to set console mode to "%(mode)s" '
'for instance %(inst)s: %(reason)s'),
{'mode': mode,
'inst': instance.uuid,
'reason': e})
raise exception.ConsoleNotAvailable()
# Waiting for the console state to change (disabled/enabled)
try:
timer = loopingcall.BackOffLoopingCall(_wait_state, state=mode)
return timer.start(
starting_interval=_CONSOLE_STATE_CHECKING_INTERVAL,
timeout=CONF.ironic.serial_console_state_timeout,
jitter=0.5).wait()
except loopingcall.LoopingCallTimeOut:
LOG.error(_LE('Timeout while waiting for console mode to be '
'set to "%(mode)s" on node %(node)s'),
{'mode': mode,
'node': node_uuid})
raise exception.ConsoleNotAvailable()
# Acquire the console
console = _get_console()
# NOTE: Resetting console is a workaround to force acquiring
# console when it has already been acquired by another user/operator.
# IPMI serial console does not support multi session, so
# resetting console will deactivate any active one without
# warning the operator.
if console['console_enabled']:
try:
# Disable console
_enable_console(False)
# Then re-enable it
console = _enable_console(True)
except exception.ConsoleNotAvailable:
# NOTE: We try to do recover on failure.
# But if recover fails, the console may remain in
# "disabled" state and cause any new connection
# will be refused.
console = _enable_console(True)
if console['console_enabled']:
return {'node': node,
'console_info': console['console_info']}
else:
LOG.debug('Console is disabled for instance %s',
instance.uuid)
raise exception.ConsoleNotAvailable()
def get_serial_console(self, context, instance):
"""Acquire serial console information.
:param context: request context
:param instance: nova instance
:return: ConsoleSerial object
:raise ConsoleTypeUnavailable: if serial console is unavailable
for the instance
"""
LOG.debug('Getting serial console', instance=instance)
try:
result = self._get_node_console_with_reset(instance)
except exception.ConsoleNotAvailable:
raise exception.ConsoleTypeUnavailable(console_type='serial')
node = result['node']
console_info = result['console_info']
if console_info["type"] != "socat":
LOG.warning(_LW('Console type "%(type)s" (of ironic node '
'%(node)s) does not support Nova serial console'),
{'type': console_info["type"],
'node': node.uuid},
instance=instance)
raise exception.ConsoleTypeUnavailable(console_type='serial')
# Parse and check the console url
url = urlparse.urlparse(console_info["url"])
try:
scheme = url.scheme
hostname = url.hostname
port = url.port
if not (scheme and hostname and port):
raise AssertionError()
except (ValueError, AssertionError):
LOG.error(_LE('Invalid Socat console URL "%(url)s" '
'(ironic node %(node)s)'),
{'url': console_info["url"],
'node': node.uuid},
instance=instance)
raise exception.ConsoleTypeUnavailable(console_type='serial')
if scheme == "tcp":
return console_type.ConsoleSerial(host=hostname,
port=port)
else:
LOG.warning(_LW('Socat serial console only supports "tcp". '
'This URL is "%(url)s" (ironic node %(node)s).'),
{'url': console_info["url"],
'node': node.uuid},
instance=instance)
raise exception.ConsoleTypeUnavailable(console_type='serial')
| 44.054348 | 96 | 0.59616 |
79585d52e8413293c5cb00325344e420b534f4ee | 2,287 | py | Python | clicktocall/app.py | Here4Reentry/baltimore-ringdown | e4f18cfa457dadad4089a2f4dc10d00b54ce580f | [
"MIT"
] | null | null | null | clicktocall/app.py | Here4Reentry/baltimore-ringdown | e4f18cfa457dadad4089a2f4dc10d00b54ce580f | [
"MIT"
] | null | null | null | clicktocall/app.py | Here4Reentry/baltimore-ringdown | e4f18cfa457dadad4089a2f4dc10d00b54ce580f | [
"MIT"
] | null | null | null | from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
from flask import url_for
from twilio import twiml
from twilio.rest import TwilioRestClient
# Declare and configure application
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
# Route for Click to Call demo page.
@app.route('/')
def index():
return render_template('index.html',
configuration_error=None)
# Voice Request URL
@app.route('/call', methods=['POST'])
def call():
# Get phone number we need to call
phone_number = request.form.get('phoneNumber', None)
try:
twilio_client = TwilioRestClient(app.config['TWILIO_ACCOUNT_SID'],
app.config['TWILIO_AUTH_TOKEN'])
except Exception as e:
msg = 'Missing configuration variable: {0}'.format(e)
return jsonify({'error': msg})
try:
twilio_client.calls.create(from_=app.config['TWILIO_CALLER_ID'],
to=phone_number,
url=url_for('.outbound',
_external=True))
except Exception as e:
app.logger.error(e)
return jsonify({'error': str(e)})
return jsonify({'message': 'Pick up your phone!'})
@app.route('/outbound', methods=['POST'])
def outbound():
response = twiml.Response()
response.say("Thank you for calling here for reentry baltimore's ringdown application."
" Please introduce yourself and remember that these volunteers are here to help."
"If you are sent to voice mail, record a message then press star or pound in order to continue looking for help."
" You will now be connected with a returning citizen voulenteer.",
voice='man')
# Uncomment this code and replace the number with the number you want
# your customers to call.
with response.dial() as dial:
dial.number("+14438191461")
return str(response)
# Route for Landing Page after Heroku deploy.
@app.route('/landing.html')
def landing():
return render_template('landing.html',
configuration_error=None)
| 32.671429 | 130 | 0.629209 |
79585d6326e9646ea43472ed0e111023799aa21f | 1,401 | py | Python | tasks/attestation.py | faasm/experiment-sgx | e5633b7314676606df7f5e1f5244532eb2173c72 | [
"Apache-2.0"
] | null | null | null | tasks/attestation.py | faasm/experiment-sgx | e5633b7314676606df7f5e1f5244532eb2173c72 | [
"Apache-2.0"
] | 7 | 2021-09-24T08:45:51.000Z | 2022-03-10T07:51:18.000Z | tasks/attestation.py | faasm/experiment-sgx | e5633b7314676606df7f5e1f5244532eb2173c72 | [
"Apache-2.0"
] | 1 | 2021-09-29T05:30:01.000Z | 2021-09-29T05:30:01.000Z | from invoke import task
from subprocess import run
from tasks.util.env import (
AZURE_ATTESTATION_PROVIDER_NAME,
AZURE_RESOURCE_GROUP,
AZURE_SGX_LOCATION,
)
def run_az_cmd(cmd):
_cmd = "az {}".format(cmd)
print(_cmd)
run(_cmd, shell=True, check=True)
@task
def set_up(ctx):
"""
Set up Azure Attestation Service extension (only needs to be done once)
"""
run_az_cmd("extension add --name attestation")
run_az_cmd("extension show --name attestation --query version")
run_az_cmd("provider register --name Microsoft.Attestation")
def run_az_attestation_cmd(action, az_args=None):
cmd = [
"az",
"attestation {}".format(action),
"--resource-group {}".format(AZURE_RESOURCE_GROUP),
"--name {}".format(AZURE_ATTESTATION_PROVIDER_NAME),
]
if az_args:
cmd.extend(az_args)
cmd = " ".join(cmd)
print(cmd)
run(cmd, shell=True, check=True)
@task
def provider_create(ctx):
"""
Create attestation provider
"""
run_az_attestation_cmd(
"create",
[
"--location {}".format(AZURE_SGX_LOCATION),
],
)
@task
def provider_show(ctx):
"""
Show attestation provider information
"""
run_az_attestation_cmd("show")
@task
def provider_delete(ctx):
"""
Delete attestation provider
"""
run_az_attestation_cmd("delete")
| 20.304348 | 75 | 0.640971 |
79585e019d0028e3854779db1311f3e80898c69e | 11,416 | py | Python | varnish/test_varnish.py | skwp/integrations-core | dc4d1bdb369916a574cc8aa43603ae23eecb8de1 | [
"BSD-3-Clause"
] | 1 | 2021-06-17T20:22:35.000Z | 2021-06-17T20:22:35.000Z | varnish/test_varnish.py | skwp/integrations-core | dc4d1bdb369916a574cc8aa43603ae23eecb8de1 | [
"BSD-3-Clause"
] | null | null | null | varnish/test_varnish.py | skwp/integrations-core | dc4d1bdb369916a574cc8aa43603ae23eecb8de1 | [
"BSD-3-Clause"
] | 1 | 2019-10-25T18:59:54.000Z | 2019-10-25T18:59:54.000Z | # stdlib
import os
import re
import subprocess
import mock
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
# 3p
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
# project
from tests.checks.common import AgentCheckTest, Fixtures
# This is a small extract of metrics from varnish. This is meant to test that
# the check gather metrics. This the check return everything from varnish
# without any selection/rename, their is no point in having a complete list.
COMMON_METRICS = [
'varnish.uptime', # metrics where the 'MAIN' prefix was removed
'varnish.sess_conn', # metrics where the 'MAIN' prefix was removed
'varnish.sess_drop', # metrics where the 'MAIN' prefix was removed
'varnish.sess_fail', # metrics where the 'MAIN' prefix was removed
'varnish.client_req_400', # metrics where the 'MAIN' prefix was removed
'varnish.client_req_417', # metrics where the 'MAIN' prefix was removed
'varnish.client_req', # metrics where the 'MAIN' prefix was removed
'varnish.cache_hit', # metrics where the 'MAIN' prefix was removed
'varnish.cache_hitpass', # metrics where the 'MAIN' prefix was removed
'varnish.cache_miss', # metrics where the 'MAIN' prefix was removed
'varnish.backend_conn', # metrics where the 'MAIN' prefix was removed
'varnish.backend_unhealthy', # metrics where the 'MAIN' prefix was removed
'varnish.backend_busy', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_eof', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_bad', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_none', # metrics where the 'MAIN' prefix was removed
'varnish.fetch_1xx', # metrics where the 'MAIN' prefix was removed
'varnish.pools', # metrics where the 'MAIN' prefix was removed
'varnish.busy_sleep', # metrics where the 'MAIN' prefix was removed
'varnish.busy_wakeup', # metrics where the 'MAIN' prefix was removed
'varnish.busy_killed', # metrics where the 'MAIN' prefix was removed
'varnish.sess_queued', # metrics where the 'MAIN' prefix was removed
'varnish.sess_dropped', # metrics where the 'MAIN' prefix was removed
'varnish.n_object', # metrics where the 'MAIN' prefix was removed
'varnish.n_vampireobject', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl_avail', # metrics where the 'MAIN' prefix was removed
'varnish.n_vcl_discard', # metrics where the 'MAIN' prefix was removed
'varnish.bans', # metrics where the 'MAIN' prefix was removed
'varnish.bans_completed', # metrics where the 'MAIN' prefix was removed
'varnish.bans_obj', # metrics where the 'MAIN' prefix was removed
'varnish.bans_req', # metrics where the 'MAIN' prefix was removed
'varnish.MGT.child_start',
'varnish.MGT.child_exit',
'varnish.MGT.child_stop',
'varnish.MEMPOOL.busyobj.live',
'varnish.MEMPOOL.busyobj.pool',
'varnish.MEMPOOL.busyobj.allocs',
'varnish.MEMPOOL.busyobj.frees',
'varnish.SMA.s0.c_req',
'varnish.SMA.s0.c_fail',
'varnish.SMA.Transient.c_req',
'varnish.SMA.Transient.c_fail',
'varnish.VBE.boot.default.req',
'varnish.LCK.backend.creat',
'varnish.LCK.backend_tcp.creat',
'varnish.LCK.ban.creat',
'varnish.LCK.ban.locks',
'varnish.LCK.busyobj.creat',
'varnish.LCK.mempool.creat',
'varnish.LCK.vbe.creat',
'varnish.LCK.vbe.destroy',
'varnish.LCK.vcl.creat',
'varnish.LCK.vcl.destroy',
'varnish.LCK.vcl.locks',
]
VARNISH_DEFAULT_VERSION = "4.1.7"
VARNISHADM_PATH = "varnishadm"
SECRETFILE_PATH = "secretfile"
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
# Varnish < 4.x varnishadm output
def debug_health_mock(*args, **kwargs):
if args[0][0] == VARNISHADM_PATH or args[0][1] == VARNISHADM_PATH:
return (Fixtures.read_file('debug_health_output', sdk_dir=FIXTURE_DIR), "", 0)
else:
return (Fixtures.read_file('stats_output', sdk_dir=FIXTURE_DIR), "", 0)
# Varnish >= 4.x varnishadm output
def backend_list_mock(*args, **kwargs):
if args[0][0] == VARNISHADM_PATH or args[0][1] == VARNISHADM_PATH:
return (Fixtures.read_file('backend_list_output', sdk_dir=FIXTURE_DIR), "", 0)
else:
return (Fixtures.read_file('stats_output', sdk_dir=FIXTURE_DIR), "", 0)
# Varnish >= 4.x Varnishadm manually set backend to sick
def backend_manual_unhealthy_mock(*args, **kwargs):
if args[0][0] == VARNISHADM_PATH or args[0][1] == VARNISHADM_PATH:
return (Fixtures.read_file('backend_manually_unhealthy', sdk_dir=FIXTURE_DIR), "", 0)
else:
return (Fixtures.read_file('stats_output', sdk_dir=FIXTURE_DIR), "", 0)
@attr(requires='varnish')
class VarnishCheckTest(AgentCheckTest):
CHECK_NAME = 'varnish'
def _get_varnish_stat_path(self):
varnish_version = os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION).split('.', 1)[0]
return "%s/ci/varnishstat%s" % (os.path.dirname(os.path.abspath(__file__)), varnish_version)
def _get_config_by_version(self, name=None):
config = {
'instances': [{
'varnishstat': self._get_varnish_stat_path(),
'tags': ['cluster:webs']
}]
}
if name:
config['instances'][0]['name'] = name
return config
def test_check(self):
config = self._get_config_by_version()
self.run_check_twice(config)
for mname in COMMON_METRICS:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
def test_inclusion_filter(self):
config = self._get_config_by_version()
config['instances'][0]['metrics_filter'] = ['SMA.*']
self.run_check_twice(config)
for mname in COMMON_METRICS:
if 'SMA.' in mname:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
else:
self.assertMetric(mname, count=0, tags=['cluster:webs', 'varnish_name:default'])
def test_exclusion_filter(self):
# FIXME: Bugfix not released yet for version 5 so skip this test for this version:
# See https://github.com/varnishcache/varnish-cache/issues/2320
config = self._get_config_by_version()
config['instances'][0]['metrics_filter'] = ['^SMA.Transient.c_req']
self.load_check(config)
version, _ = self.check._get_version_info([self._get_varnish_stat_path()])
if str(version) == '5.0.0':
raise SkipTest('varnish bugfix for exclusion blob not released yet for version 5 so skip this test')
self.run_check_twice(config)
for mname in COMMON_METRICS:
if 'SMA.Transient.c_req' in mname:
self.assertMetric(mname, count=0, tags=['cluster:webs', 'varnish_name:default'])
elif 'varnish.uptime' not in mname:
self.assertMetric(mname, count=1, tags=['cluster:webs', 'varnish_name:default'])
# Test the varnishadm output for version >= 4.x with manually set health
@mock.patch('_varnish.geteuid')
@mock.patch('_varnish.Varnish._get_version_info')
@mock.patch('_varnish.get_subprocess_output', side_effect=backend_manual_unhealthy_mock)
def test_command_line_manually_unhealthy(self, mock_subprocess, mock_version, mock_geteuid):
mock_version.return_value = LooseVersion('4.0.0'), True
mock_geteuid.return_value = 0
config = self._get_config_by_version()
config['instances'][0]['varnishadm'] = VARNISHADM_PATH
config['instances'][0]['secretfile'] = SECRETFILE_PATH
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], [VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'debug.health'])
self.assertServiceCheckCritical("varnish.backend_healthy", tags=['backend:default'], count=1)
mock_version.return_value = LooseVersion('4.1.0'), True
mock_geteuid.return_value = 1
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], ['sudo', VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'backend.list', '-p'])
# Test the Varnishadm output for version >= 4.x
@mock.patch('_varnish.geteuid')
@mock.patch('_varnish.Varnish._get_version_info')
@mock.patch('_varnish.get_subprocess_output', side_effect=backend_list_mock)
def test_command_line_post_varnish4(self, mock_subprocess, mock_version, mock_geteuid):
mock_version.return_value = LooseVersion('4.0.0'), True
mock_geteuid.return_value = 0
config = self._get_config_by_version()
config['instances'][0]['varnishadm'] = VARNISHADM_PATH
config['instances'][0]['secretfile'] = SECRETFILE_PATH
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], [VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'debug.health'])
self.assertServiceCheckOK("varnish.backend_healthy", tags=['backend:backend2'], count=1)
mock_version.return_value = LooseVersion('4.1.0'), True
mock_geteuid.return_value = 1
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], ['sudo', VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'backend.list', '-p'])
# Test the varnishadm output for Varnish < 4.x
@mock.patch('_varnish.geteuid')
@mock.patch('_varnish.Varnish._get_version_info')
@mock.patch('_varnish.get_subprocess_output', side_effect=debug_health_mock)
def test_command_line(self, mock_subprocess, mock_version, mock_geteuid):
mock_version.return_value = LooseVersion('4.0.0'), True
mock_geteuid.return_value = 0
config = self._get_config_by_version()
config['instances'][0]['varnishadm'] = VARNISHADM_PATH
config['instances'][0]['secretfile'] = SECRETFILE_PATH
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], [VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'debug.health'])
self.assertServiceCheckOK("varnish.backend_healthy", tags=['backend:default'], count=1)
mock_version.return_value = LooseVersion('4.1.0'), True
mock_geteuid.return_value = 1
self.run_check(config)
args, _ = mock_subprocess.call_args
self.assertEquals(args[0], ['sudo', VARNISHADM_PATH, '-S', SECRETFILE_PATH, 'backend.list', '-p'])
# This the docker image is in a different repository, we check that the
# verison requested in the FLAVOR_VERSION is the on running inside the
# container.
def test_version(self):
varnishstat = self._get_varnish_stat_path()
output = subprocess.check_output([varnishstat, "-V"])
res = re.search(r"varnish-(\d+\.\d\.\d)", output)
if res is None:
raise Exception("Could not retrieve varnish version from docker")
version = res.groups()[0]
self.assertEquals(version, os.environ.get('FLAVOR_VERSION', VARNISH_DEFAULT_VERSION))
| 46.595918 | 112 | 0.670725 |
79585ec67849a88e85c198280a94b42449d532e5 | 3,101 | py | Python | applications/classification/predict_multiclass_svm.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,753 | 2015-01-02T11:34:13.000Z | 2022-03-25T07:04:27.000Z | applications/classification/predict_multiclass_svm.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,404 | 2015-01-02T19:31:41.000Z | 2022-03-09T10:58:22.000Z | applications/classification/predict_multiclass_svm.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 1,156 | 2015-01-03T01:57:21.000Z | 2022-03-26T01:06:28.000Z | #!/usr/bin/env python
# Copyright (c) The Shogun Machine Learning Toolbox
# Written (w) 2014 Daniel Pyrathon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the Shogun Development Team.
import argparse
import logging
from contextlib import closing
from shogun import (LibSVMFile, SparseRealFeatures, MulticlassLabels,
MulticlassLibSVM, SerializableHdf5File,
MulticlassAccuracy)
from utils import get_features_and_labels
LOGGER = logging.getLogger(__file__)
def parse_arguments():
parser = argparse.ArgumentParser(description="Test a serialized SVM \
classifier agains a SVMLight test file")
parser.add_argument('--classifier', required=True, type=str,
help='Path to training dataset in LibSVM format.')
parser.add_argument('--testset', required=True, type=str,
help='Path to the SVMLight test file')
parser.add_argument('--output', required=True, type=str,
help='File path to write predicted labels')
return parser.parse_args()
def main(classifier, testset, output):
LOGGER.info("SVM Multiclass evaluation")
svm = MulticlassLibSVM()
serialized_classifier = SerializableHdf5File(classifier, 'r')
with closing(serialized_classifier):
svm.load_serializable(serialized_classifier)
test_feats, test_labels = get_features_and_labels(LibSVMFile(testset))
predicted_labels = svm.apply(test_feats)
with open(output, 'w') as f:
for cls in predicted_labels.get_labels():
f.write("%s\n" % int(cls))
LOGGER.info("Predicted labels saved in: '%s'" % output)
if __name__ == '__main__':
args = parse_arguments()
main(args.classifier, args.testset, args.output)
| 40.802632 | 82 | 0.767494 |
795860b3d353cf25604abb28fb8f186854acde73 | 5,530 | py | Python | tests/providers/google/cloud/sensors/test_pubsub.py | elgalu/apache-airflow | f74da5025d9f635bc49e631883fc1537cd16b620 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 7 | 2018-11-19T12:05:13.000Z | 2020-01-17T08:30:38.000Z | tests/providers/google/cloud/sensors/test_pubsub.py | elgalu/apache-airflow | f74da5025d9f635bc49e631883fc1537cd16b620 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | tests/providers/google/cloud/sensors/test_pubsub.py | elgalu/apache-airflow | f74da5025d9f635bc49e631883fc1537cd16b620 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from typing import Any, Dict, List
from unittest import mock
import pytest
from google.cloud.pubsub_v1.types import ReceivedMessage
from airflow.exceptions import AirflowSensorTimeout
from airflow.providers.google.cloud.sensors.pubsub import PubSubPullSensor
TASK_ID = 'test-task-id'
TEST_PROJECT = 'test-project'
TEST_SUBSCRIPTION = 'test-subscription'
class TestPubSubPullSensor(unittest.TestCase):
def _generate_messages(self, count):
return [
ReceivedMessage(
ack_id="%s" % i,
message={
"data": f'Message {i}'.encode('utf8'),
"attributes": {"type": "generated message"},
},
)
for i in range(1, count + 1)
]
def _generate_dicts(self, count):
return [ReceivedMessage.to_dict(m) for m in self._generate_messages(count)]
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_poke_no_messages(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
)
mock_hook.return_value.pull.return_value = []
assert operator.poke({}) is False
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_poke_with_ack_messages(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
ack_messages=True,
)
generated_messages = self._generate_messages(5)
mock_hook.return_value.pull.return_value = generated_messages
assert operator.poke({}) is True
mock_hook.return_value.acknowledge.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=generated_messages,
)
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_execute(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
)
generated_messages = self._generate_messages(5)
generated_dicts = self._generate_dicts(5)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=True
)
assert generated_dicts == response
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_execute_timeout(self, mock_hook):
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
timeout=1,
)
mock_hook.return_value.pull.return_value = []
with pytest.raises(AirflowSensorTimeout):
operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
max_messages=5,
return_immediately=False,
)
@mock.patch('airflow.providers.google.cloud.sensors.pubsub.PubSubHook')
def test_execute_with_messages_callback(self, mock_hook):
generated_messages = self._generate_messages(5)
messages_callback_return_value = 'asdfg'
def messages_callback(
pulled_messages: List[ReceivedMessage],
context: Dict[str, Any],
):
assert pulled_messages == generated_messages
assert isinstance(context, dict)
for key in context.keys():
assert isinstance(key, str)
return messages_callback_return_value
messages_callback = mock.Mock(side_effect=messages_callback)
operator = PubSubPullSensor(
task_id=TASK_ID,
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
poke_interval=0,
messages_callback=messages_callback,
)
mock_hook.return_value.pull.return_value = generated_messages
response = operator.execute({})
mock_hook.return_value.pull.assert_called_once_with(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=5, return_immediately=True
)
messages_callback.assert_called_once()
assert response == messages_callback_return_value
| 35 | 108 | 0.670524 |
79586125eaf4f4c389d5ba8ab9c6c610fdf08e88 | 1,620 | py | Python | chainer0/datasets/_mnist_helper.py | koki0702/chainer0 | 02a2a63fb1dc68d9f4bdc7b84c3fca66e4ba5df1 | [
"MIT"
] | null | null | null | chainer0/datasets/_mnist_helper.py | koki0702/chainer0 | 02a2a63fb1dc68d9f4bdc7b84c3fca66e4ba5df1 | [
"MIT"
] | null | null | null | chainer0/datasets/_mnist_helper.py | koki0702/chainer0 | 02a2a63fb1dc68d9f4bdc7b84c3fca66e4ba5df1 | [
"MIT"
] | null | null | null | import gzip
import struct
import numpy
import six
from chainer0.datasets import download
from chainer0.datasets import tuple_dataset
def make_npz(path, urls):
x_url, y_url = urls
x_path = download.cached_download(x_url)
y_path = download.cached_download(y_url)
with gzip.open(x_path, 'rb') as fx, gzip.open(y_path, 'rb') as fy:
fx.read(4)
fy.read(4)
N, = struct.unpack('>i', fx.read(4))
if N != struct.unpack('>i', fy.read(4))[0]:
raise RuntimeError('wrong pair of MNIST images and labels')
fx.read(8)
x = numpy.empty((N, 784), dtype=numpy.uint8)
y = numpy.empty(N, dtype=numpy.uint8)
for i in six.moves.range(N):
y[i] = ord(fy.read(1))
for j in six.moves.range(784):
x[i, j] = ord(fx.read(1))
numpy.savez_compressed(path, x=x, y=y)
return {'x': x, 'y': y}
def preprocess_mnist(raw, withlabel, ndim, scale, image_dtype, label_dtype,
rgb_format):
images = raw['x']
if ndim == 2:
images = images.reshape(-1, 28, 28)
elif ndim == 3:
images = images.reshape(-1, 1, 28, 28)
if rgb_format:
images = numpy.broadcast_to(images,
(len(images), 3) + images.shape[2:])
elif ndim != 1:
raise ValueError('invalid ndim for MNIST dataset')
images = images.astype(image_dtype)
images *= scale / 255.
if withlabel:
labels = raw['y'].astype(label_dtype)
return tuple_dataset.TupleDataset(images, labels)
else:
return images | 29.454545 | 76 | 0.578395 |
795861342af4fdd8e2baf13e844d445c311e361f | 1,831 | py | Python | p3orm/utils.py | rafalstapinski/porm | 5dfd797d846b156dbadec769c68f8295bd5ba8de | [
"MIT"
] | 4 | 2022-02-21T05:28:41.000Z | 2022-02-23T01:02:48.000Z | p3orm/utils.py | rafalstapinski/p3orm | 5dfd797d846b156dbadec769c68f8295bd5ba8de | [
"MIT"
] | null | null | null | p3orm/utils.py | rafalstapinski/p3orm | 5dfd797d846b156dbadec769c68f8295bd5ba8de | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Optional, Tuple, Type, Union, get_args, get_origin
import asyncpg
from pypika.queries import QueryBuilder
from pypika.terms import BasicCriterion, ContainsCriterion, Criterion, Equality, NullValue, Parameter, RangeCriterion
def record_to_kwargs(record: asyncpg.Record) -> Dict[str, Any]:
return {k: v for k, v in record.items()}
def with_returning(query: QueryBuilder, returning: Optional[str] = "*") -> str:
return f"{query.get_sql()} RETURNING {returning}"
def paramaterize(criterion: Criterion, query_args: List[Any] = None) -> Tuple[Criterion, List[Any]]:
if query_args == None:
query_args = []
param_start_index = max(len(query_args), 1)
if isinstance(criterion, BasicCriterion):
param = Parameter(f"${param_start_index}")
query_args.append(criterion.right.value)
return BasicCriterion(criterion.comparator, criterion.left, param, criterion.alias), query_args
elif isinstance(criterion, ContainsCriterion):
param = Parameter(f"ANY (${param_start_index})")
query_args.append([i.value for i in criterion.container.values if not isinstance(i, NullValue)])
return BasicCriterion(Equality.eq, criterion.term, param, criterion.alias), query_args
elif isinstance(criterion, RangeCriterion):
start_param = Parameter(f"${param_start_index}")
end_param = Parameter(f"${param_start_index + 1}")
query_args += [criterion.start.value, criterion.end.value]
# There are several RangeCriterion, create a new one with the same subclass
return criterion.__class__(criterion.term, start_param, end_param, criterion.alias), query_args
return criterion, query_args
def is_optional(_type: Type):
return get_origin(_type) is Union and type(None) in get_args(_type)
| 39.804348 | 117 | 0.725287 |
795861959f3def76331479c7021bd3052d77cacb | 8,123 | py | Python | keylime/ca_impl_cfssl.py | mrcdb/keylime | b7c944584ae69e32f440bac376dffe0a0a335858 | [
"BSD-2-Clause"
] | null | null | null | keylime/ca_impl_cfssl.py | mrcdb/keylime | b7c944584ae69e32f440bac376dffe0a0a335858 | [
"BSD-2-Clause"
] | null | null | null | keylime/ca_impl_cfssl.py | mrcdb/keylime | b7c944584ae69e32f440bac376dffe0a0a335858 | [
"BSD-2-Clause"
] | null | null | null | '''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2016 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import common
import keylime_logging
import json
import ConfigParser
import os
import subprocess
import tornado_requests
from M2Crypto import EVP, X509
import secure_mount
import base64
import time
import socket
logger = keylime_logging.init_logging('ca_impl_cfssl')
config = ConfigParser.SafeConfigParser()
config.read(common.CONFIG_FILE)
cfsslproc = None
def post_cfssl(url,data):
numtries = 0
maxr = 10
retry=0.05
while True:
try:
response = tornado_requests.request("POST",url,params=None,data=data,context=None)
break
except Exception as e:
if tornado_requests.is_refused(e):
numtries+=1
if numtries >= maxr:
logger.error("Quiting after max number of retries to connect to cfssl server")
raise e
logger.info("Connection to cfssl refused %d/%d times, trying again in %f seconds..."%(numtries,maxr,retry))
time.sleep(retry)
continue
else:
raise e
return response
def start_cfssl(cmdline=""):
global cfsslproc
cmd = "cfssl serve -loglevel=1 %s "%cmdline
env = os.environ.copy()
env['PATH']=env['PATH']+":/usr/local/bin"
# make sure cfssl isn't running
os.system('pkill -f cfssl')
cfsslproc = subprocess.Popen(cmd,env=env,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,universal_newlines=True)
if cfsslproc.returncode is not None:
raise Exception("Unable to launch %: failed with code "%(cmd,cfsslproc.returncode))
logger.debug("Waiting for cfssl to start...")
while True:
line = cfsslproc.stdout.readline()
if "Now listening on" in line:
break
time.sleep(0.2)# give cfssl a little more time to get started
logger.debug("cfssl started successfully")
def stop_cfssl():
global cfsslproc
if cfsslproc is not None:
cfsslproc.kill()
os.system("pkill -f cfssl")
cfsslproc = None
def mk_cacert():
csr = {"CN": config.get('ca','cert_ca_name'),
"key": {
"algo": "rsa",
"size": config.getint('ca','cert_bits')
},
"names": [
{
"C": config.get('ca','cert_country'),
"L": config.get('ca','cert_locality'),
"O": config.get('ca','cert_organization'),
"OU": config.get('ca','cert_org_unit'),
"ST": config.get('ca','cert_state')
}
]
}
data = json.dumps(csr)
try:
start_cfssl()
response = post_cfssl("http://127.0.0.1:8888/api/v1/cfssl/init_ca",data=data)
finally:
stop_cfssl()
if response.status_code!=200:
raise Exception("Unable to create CA Error: %s"%(response.body))
body = json.loads(response.body)
if body['success']:
pk = EVP.load_key_string(str(body['result']['private_key']))
cert = X509.load_cert_string(str(body['result']['certificate']))
pkey = cert.get_pubkey()
return cert, pk, pkey
else:
raise Exception("Unable to create CA")
def mk_signed_cert(cacert,ca_pk,name,serialnum):
csr = {"request": {
"CN": name,
"hosts": [
name,
],
"key": {
"algo": "rsa",
"size": config.getint('ca','cert_bits')
},
"names": [
{
"C": config.get('ca','cert_country'),
"L": config.get('ca','cert_locality'),
"O": config.get('ca','cert_organization'),
"OU": config.get('ca','cert_org_unit'),
"ST": config.get('ca','cert_state')
}
]
}
}
# check CRL distribution point
disturl = config.get('ca','cert_crl_dist')
if disturl == 'default':
disturl = "http://%s:%s/crl.der"%(socket.getfqdn(),common.CRL_PORT)
# set up config for cfssl server
cfsslconfig = {
"signing": {
"default": {
"usages": ["client auth","server auth","key agreement","key encipherment","signing","digital signature","data encipherment"],
"expiry": "8760h",
"crl_url": disturl,
}
}
}
data = json.dumps(csr)
secdir = secure_mount.mount()
try:
# need to temporarily write out the private key with no password
# to tmpfs
ca_pk.save_key('%s/ca-key.pem'%secdir, None)
with open('%s/cfsslconfig.json'%secdir,'w') as f:
json.dump(cfsslconfig, f)
cmdline = "-config=%s/cfsslconfig.json"%secdir
priv_key = os.path.abspath("%s/ca-key.pem"%secdir)
cmdline += " -ca-key %s -ca cacert.crt"%(priv_key)
start_cfssl(cmdline)
response = post_cfssl("http://127.0.0.1:8888/api/v1/cfssl/newcert",data=data)
finally:
stop_cfssl()
os.remove('%s/ca-key.pem'%secdir)
os.remove('%s/cfsslconfig.json'%secdir)
if response.status_code!=200:
raise Exception("Unable to create cert for %s. Error: %s"%(name,response.body))
body = json.loads(response.body)
if body['success']:
pk = EVP.load_key_string(str(body['result']['private_key']))
cert = X509.load_cert_string(str(body['result']['certificate']))
return cert, pk
else:
raise Exception("Unable to create cert for %s"%name)
def gencrl(serials,cert,ca_pk):
request = {"certificate": cert,
"serialNumber": serials,
"issuingKey": ca_pk,
"expireTime": ""
}
data = json.dumps(request)
secdir = secure_mount.mount()
try:
# need to temporarily write out the private key with no password
# to tmpfs
priv_key = os.path.abspath("%s/ca-key.pem"%secdir)
with open(priv_key,'wb') as f:
f.write(ca_pk)
cmdline = " -ca-key %s -ca cacert.crt"%(priv_key)
start_cfssl(cmdline)
response = post_cfssl("http://127.0.0.1:8888/api/v1/cfssl/gencrl",data=data)
finally:
stop_cfssl()
os.remove('%s/ca-key.pem'%secdir)
if response.status_code!=200:
raise Exception("Unable to create crl for cert serials %s. Error: %s"%(serials,response.body))
body = json.loads(response.body)
if body['success']:
#pk = EVP.load_key_string(str(body['result']['private_key']))
#cert = X509.load_cert_string(str(body['result']['certificate']))
#return cert, pk
retval = base64.b64decode(body['result'])
else:
raise Exception("Unable to create crl for cert serials %s. Error: %s"%(serials,body['errors']))
return retval
# ./cfssl gencrl revoke ca.pem ca-key.pem | base64 -D > mycrl.der
# mk_cacert()
# mk_signed_cert("", "", "hello", None) | 34.862661 | 141 | 0.586483 |
795861f9149e7a17e6f3b9fe31d0bfdde0e5566f | 4,131 | py | Python | openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0017.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0017.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0017.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : python驱动pyog
Case Name : openGauss模式连接数据库,insert..return
Description :
1.配置pg_hba入口
2.连接数据库
3.insert..return
4.断开连接
Expect :
1.执行成功
2.连接成功,db.state返回'idle'
3.执行成功
4.执行成功,db.state返回'closed'
History :
"""
import os
import unittest
import py_opengauss
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class ConnPython17(unittest.TestCase):
def setUp(self):
self.pri_user = Node('PrimaryDbUser')
self.pri_sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.LOG = Logger()
text = '----Opengauss_Function_Connect_Python_Case0017 start----'
self.LOG.info(text)
def test_conn(self):
text = '----step1: 配置pg_hba入口 expect: 成功----'
self.LOG.info(text)
host_cmd = "ifconfig -a|grep inet6 -a2|" \
"grep broadcast|awk '{print $2}'"
self.host = os.popen(host_cmd).readlines()[0].strip()
guc_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_guc reload -D {macro.DB_INSTANCE_PATH} ' \
f'-h "host {self.pri_user.db_name} {self.pri_user.db_user} ' \
f'{self.host}/32 sha256"'
self.LOG.info(guc_cmd)
guc_res = self.pri_user.sh(guc_cmd).result()
self.LOG.info(guc_res)
self.assertIn(self.constant.GSGUC_SUCCESS_MSG, guc_res, text)
text = '----step2: 连接数据库 expect: 成功----'
self.LOG.info(text)
conn_info = f'opengauss://{self.pri_user.db_user}:' \
f'{self.pri_user.db_password}@{self.pri_user.db_host}:' \
f'{self.pri_user.db_port}/{self.pri_user.db_name}'
db = py_opengauss.open(conn_info)
self.assertEqual('idle', db.state, text)
text = '----step3: insert..return expect: 成功----'
self.LOG.info(text)
cmd = 'drop table if exists t_py_1'
sql = db.prepare(cmd)
self.assertEqual(sql(), (self.constant.TABLE_DROP_SUCCESS,
None), text)
cmd = 'create table t_py_1(id int, name varchar(20))'
sql = db.prepare(cmd)
self.assertEqual(sql(), (self.constant.CREATE_TABLE_SUCCESS,
None), text)
cmd = "insert into t_py_1 values(1, 'apple'), " \
"(3, 'orange'), (4, 'grape') returning *"
sql = db.prepare(cmd)
expect = [(1, 'apple'), (3, 'orange'), (4, 'grape')]
self.assertEqual(sql(), expect, text)
cmd = "insert into t_py_1 values(1, 'apple'), " \
"(3, 'orange'), (4, 'grape') returning id"
sql = db.prepare(cmd)
expect = [(1,), (3,), (4,)]
self.assertEqual(sql(), expect, text)
cmd = "insert into t_py_1 values(1, 'apple'), " \
"(3, 'orange'), (4, 'grape') returning id, name"
sql = db.prepare(cmd)
expect = [(1, 'apple'), (3, 'orange'), (4, 'grape')]
self.assertEqual(sql(), expect, text)
cmd = 'drop table if exists t_py_1'
sql = db.prepare(cmd)
self.assertEqual(sql(), (self.constant.TABLE_DROP_SUCCESS,
None), text)
text = '----step4: 断开连接 expect: 成功----'
self.LOG.info(text)
db.close()
self.assertEqual('closed', db.state, text)
def tearDown(self):
text = '----run teardown----'
self.LOG.info(text)
text = '----Opengauss_Function_Connect_Python_Case0017 end----'
self.LOG.info(text)
| 34.140496 | 84 | 0.593561 |
795862a173b6d3e10e597b2b8d0d40c6a370b915 | 7,143 | py | Python | ic_shop/api/v1/urls.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | null | null | null | ic_shop/api/v1/urls.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | 5 | 2020-06-06T01:11:25.000Z | 2021-09-08T02:02:34.000Z | ic_shop/api/v1/urls.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | null | null | null | from django.conf.urls import url, include
from rest_framework.documentation import include_docs_urls
from ic_shop.api.v1 import apiViews
from ic_shop.api.v1 import authentication as jwt_auth
from rest_framework_nested import routers
router = routers.DefaultRouter()
from django.conf.urls import url, include
# from rest_framework import routers
from .apiViews import *
"""
brand router
"""
router.register(r'brand', apiViews.BrandViewSet)
"""
shopItem router
"""
router.register(r'shopItem', apiViews.ShopItemViewSet)
# /shopItem/1/tag/
shopItem_tag_router = routers.NestedSimpleRouter(router, r'shopItem', lookup='shopItem')
shopItem_tag_router.register(r'tag', apiViews.TagViewSet, base_name='tag')
# /shopItem/1/comment/
shopItem_comment_router = routers.NestedSimpleRouter(router, r'shopItem', lookup='shopItem')
shopItem_comment_router.register(r'comment', apiViews.CommentViewSet, base_name='comment')
# /shopItem/1/itemOrder/
shopItem_itemOrder_router = routers.NestedSimpleRouter(router, r'shopItem', lookup='shopItem')
shopItem_itemOrder_router.register(r'itemOrder', apiViews.ItemOrderViewSet, base_name='itemOrder')
"""
item order router
"""
router.register(r'itemOrder', apiViews.ItemOrderViewSet)
"""
tag router
"""
router.register(r'tag', apiViews.TagViewSet)
# /tag/1/shopItem/
tag_item_router = routers.NestedSimpleRouter(router, r'tag', lookup='tag')
tag_item_router.register(r'shopItem', apiViews.ShopItemViewSet, base_name='shopItem')
"""
user router
"""
router.register(r'userType', apiViews.ProfileTypeViewSet)
router.register(r'user', apiViews.ProfileViewSet)
"""
user device router
"""
user_device_router = routers.NestedSimpleRouter(router, r'user', lookup='user')
user_device_router.register(r'device', apiViews.DeviceViewSet, base_name='device')
"""
deviceType -> device router
"""
router.register(r'deviceType', apiViews.DeviceTypeViewSet)
deviceType_device_router = routers.NestedSimpleRouter(router, r'deviceType', lookup='deviceType')
deviceType_device_router.register(r'device', apiViews.DeviceViewSet, base_name='device')
"""
deviceSlot -> shopItem -> many to many storage router
"""
# router.register(r'shopItemStorage', apiViews.ShopItemStorageViewSet)
"""
device slot router
"""
# router.register(r'deviceSlot', apiViews.DeviceSlotViewSet)
# deviceSlot_storage_router = routers.NestedSimpleRouter(router, r'deviceSlot', lookup='deviceSlot')
# deviceSlot_storage_router.register(r'shopItem', apiViews.ShopItemViewSet, base_name='shopItem')
"""
device -> shop item router
"""
router.register(r'device', apiViews.DeviceViewSet)
"""
device -> device slot router
"""
# device_deviceSlot_router = routers.NestedSimpleRouter(router, r'device', lookup='device')
# device_deviceSlot_router.register(r'deviceSlot', apiViews.DeviceSlotViewSet, base_name='deviceSlot')
"""
device -> device ads router
"""
device_deviceAds_router = routers.NestedSimpleRouter(router, r'device', lookup='device')
device_deviceAds_router.register(r'deviceAds', apiViews.DeviceAdsViewSet, base_name='deviceAds')
# """
# device -> device location router
# """
# device_deviceSlot_router = routers.NestedSimpleRouter(router, r'device', lookup='device')
# device_deviceSlot_router.register(r'deviceSlot', apiViews.DeviceSlotViewSet, base_name='deviceSlot')
#
router.register(r'uploadAvatar', apiViews.ProfileAvatarViewSet)
"""
device ads router
"""
router.register(r'deviceAds', apiViews.DeviceAdsViewSet)
"""
comment router
"""
router.register(r'comment', apiViews.CommentViewSet)
"""
company router company/user/ company/device/
"""
router.register(r'company', apiViews.CompanyViewSet)
company_user_router = routers.NestedSimpleRouter(router, r'company', lookup='company')
company_user_router.register(r'user', apiViews.ProfileViewSet, base_name='user')
company_device_router = routers.NestedSimpleRouter(router, r'company', lookup='company')
company_device_router.register(r'device', apiViews.DeviceViewSet, base_name='device')
"""
location router
"""
router.register(r'location', apiViews.DeviceLocationViewSet)
"""
areaInfo router
"""
router.register(r'areaInfo', apiViews.AreaInfoRegionViewSet)
"""
device user router
"""
device_user_router = routers.NestedSimpleRouter(router, r'device', lookup='device')
device_user_router.register(r'user', apiViews.ProfileViewSet, base_name='user')
"""
device status router
"""
router.register(r'deviceMacStatus', apiViews.DeviceMacStatusViewSet)
device_macStatus_router = routers.NestedSimpleRouter(router, r'device', lookup='device')
device_macStatus_router.register(r'deviceMacStatus', apiViews.ProfileViewSet, base_name='deviceMacStatus')
"""
storage history
"""
# router.register(r'storageOperationHistory', apiViews.ShopItemStorageHistoryViewSet)
"""
operation code
"""
router.register(r'deviceOperationCode', apiViews.DeviceOperationCodeViewSet)
"""
item order status
"""
router.register(r'itemOrderStatus', apiViews.ItemOrderStatusViewSet)
"""
company device update package
"""
router.register(r'updatePackage', apiViews.CompanyDeviceUpdatePackageViewSet)
company_deviceUpdate_router = routers.NestedSimpleRouter(router, r'company', lookup='company') #company/4/updatePackage/
company_deviceUpdate_router.register(r'updatePackage', apiViews.CompanyDeviceUpdatePackageViewSet, base_name='updatePackage')
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^api/', include(tag_item_router.urls)), # tag/1/article/
url(r'^api/', include(deviceType_device_router.urls)), # deviceType/1/device/
url(r'^api/', include(shopItem_comment_router.urls)), # shopItem/1/comment/
url(r'^api/', include(shopItem_tag_router.urls)), # shopItem/1/tag/
url(r'^api/', include(shopItem_itemOrder_router.urls)), # shopItem/1/itemOrder/
url(r'^api/', include(shopItem_itemOrder_router.urls)), # shopItem/1/itemOrder/
# url(r'^api/', include(device_deviceSlot_router.urls)), # device/1/deviceSlot/
# url(r'^api/', include(deviceSlot_storage_router.urls)), # deviceSlog/1/shopItemStorage/
# url(r'^api/', include(device_deviceAds_router.urls)), # device/1/deviceSlot/
url(r'^api/', include(company_user_router.urls)), # company/1/user/
url(r'^api/', include(company_device_router.urls)), # company/1/device/
url(r'^api/', include(user_device_router.urls)), # user/1/device/
url(r'^api/', include(device_user_router.urls)), # device/1/user/
url(r'^api/', include(company_deviceUpdate_router.urls)), # company/1/updatePackage/
url(r'^doc/', include_docs_urls(title='mBusi API', permission_classes=[])),
# customize api
url(r'^api/deviceStatus/', apiViews.DeviceStatusReport.as_view()),
url(r'^api/itemOrderRefund/', apiViews.itemOrderRefund),
url(r'^api/systemTime/', apiViews.systemTime),
url(r'^api/weather/', apiViews.weather),
url(r'^api/login/', LoginViewSet.as_view()),
# url(r'^api/login/', jwt_auth.obtain_jwt_token),
url(r'^api/refresh/', jwt_auth.refresh_jwt_token),
url(r'^api/verify/', jwt_auth.verify_jwt_token),
]
| 31.606195 | 125 | 0.754865 |
795862fdef3d8b70c30af0da6f626d2054393c3b | 2,199 | py | Python | gnosis/xml/pickle/test/test_rawp_mx.py | LehmRob/od-conv | 1ffed758f27e906a1e36f5d137180a1808a82c0a | [
"MIT"
] | 3 | 2018-09-29T14:14:28.000Z | 2022-01-05T03:45:50.000Z | library/CanFestival/objdictgen/gnosis/xml/pickle/test/test_rawp_mx.py | Lembed/STM32duino-gcc-Projects | 67829e9cd1388601daf9815b0561da557e0b9f82 | [
"MIT"
] | 1 | 2017-06-17T08:15:28.000Z | 2017-06-17T08:15:28.000Z | library/CanFestival/objdictgen/gnosis/xml/pickle/test/test_rawp_mx.py | Lembed/STM32duino-gcc-Projects | 67829e9cd1388601daf9815b0561da557e0b9f82 | [
"MIT"
] | 1 | 2019-12-08T15:11:55.000Z | 2019-12-08T15:11:55.000Z | "Test rawpickles by turning off mxDateTime extension --fpm"
import mx.DateTime as date
import UserList, UserDict
from types import *
import gnosis.xml.pickle as xml_pickle
import gnosis.xml.pickle.ext as xml_pickle_ext
from gnosis.xml.pickle.util import setParanoia
from gnosis.xml.pickle.ext._mutate import __disable_extensions
from gnosis.xml.pickle.ext import mutate
import funcs
funcs.set_parser()
class foo_class:
def __init__(self):
pass
def checkfoo(o1,o2):
if o1.__class__ != foo_class or \
o2.__class__ != foo_class:
raise "ERROR(0)"
for attr in ['t','d','ud','l','ul','tup']:
if getattr(o1,attr) != getattr(o2,attr):
raise "ERROR(1)"
def printfoo(obj):
print type(obj.t), obj.t
print type(obj.d), obj.d['One'], obj.d['Two']
print type(obj.ud), obj.ud['One'], obj.ud['Two']
print type(obj.l), obj.l[0], obj.l[1]
print type(obj.ul), obj.ul[0], obj.ul[1]
print type(obj.tup), obj.tup[0], obj.tup[1]
foo = foo_class()
# allow imported classes to be restored
setParanoia(0)
# test both code paths
# path 1 - non-nested ('attr' nodes)
foo.t = date.DateTime(2000,1,2,3,4,5.6)
# path 2 - nested ('item/key/val' nodes)
foo.d = { 'One': date.DateTime(2001,2,3,4,5,6.7),
'Two': date.DateTime(2002,3,4,5,6,7.8) }
foo.ud = UserDict.UserDict()
foo.ud['One'] = date.DateTime(2003,4,5,6,7,8.9)
foo.ud['Two'] = date.DateTime(2004,5,6,7,8,9.10)
foo.l = []
foo.l.append( date.DateTime(2005,6,7,8,9,10.11) )
foo.l.append( date.DateTime(2006,7,8,9,10,11.12) )
foo.ul = UserList.UserList()
foo.ul.append( date.DateTime(2007,8,9,10,11,12.13) )
foo.ul.append( date.DateTime(2008,9,10,11,12,13.14) )
foo.tup = (date.DateTime(2009,10,11,12,13,14.15),
date.DateTime(2010,11,12,13,14,15.16))
#print "---PRE-PICKLE---"
#printfoo(foo)
__disable_extensions()
x1 = xml_pickle.dumps(foo)
#print x1
#print "---POST-PICKLE---"
bar = xml_pickle.loads(x1)
#printfoo(bar)
#bar.hi()
checkfoo(foo,bar)
# same thing on copy
x2 = xml_pickle.dumps(bar)
baz = xml_pickle.loads(x2)
checkfoo(bar,baz)
#print "---XML from original---"
#print x1
#print "---XML from copy---"
#print x2
print "** OK **"
| 23.147368 | 62 | 0.654843 |
7958640595c1e26be4f639631f379b02aa372079 | 16,103 | py | Python | tests/test_ciftify_clean_image.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 84 | 2016-09-19T16:34:37.000Z | 2022-03-31T05:47:05.000Z | tests/test_ciftify_clean_image.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 108 | 2016-09-11T15:18:31.000Z | 2022-03-06T07:03:12.000Z | tests/test_ciftify_clean_image.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 296 | 2016-09-15T17:18:26.000Z | 2022-01-17T18:16:11.000Z | #!/usr/bin/env python3
import os
import unittest
import copy
import logging
import shutil
import random
import importlib
import pandas as pd
import json
import pytest
from unittest.mock import patch
from nibabel import Nifti1Image
import numpy as np
import nilearn.image
import ciftify.bin.ciftify_clean_img as ciftify_clean_img
logging.disable(logging.CRITICAL)
def _check_input_readble_side_effect(path):
'''just returns the path'''
return(path)
def _pandas_read_side_effect(path):
'''return and empty data frame'''
return(pd.DataFrame())
class TestUserSettings(unittest.TestCase):
docopt_args = {
'<func_input>': '/path/to/func/file.nii.gz',
'--output-file': None,
'--clean-config': None,
'--drop-dummy-TRs': None,
'--no-cleaning': False,
'--detrend': False,
'--standardize': False,
'--confounds-tsv': None,
'--cf-cols': None,
'--cf-sq-cols': None,
'--cf-td-cols': None,
'--cf-sqtd-cols': None,
'--low-pass': None,
'--high-pass': None,
'--tr': '2.0',
'--smooth-fwhm': None,
'--left-surface': None,
'--right-surface': None }
json_config = '''
{
"--detrend": true,
"--standardize": true,
"--low-pass": 0.1,
"--high-pass": 0.01
}
'''
@patch('ciftify.bin.ciftify_clean_img.load_json_file', side_effect = json.loads)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_that_updated_arg_is_present(self, mock_readable, mock_writable, mock_json):
arguments = copy.deepcopy(self.docopt_args)
arguments['--clean-config'] = self.json_config
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == 0.01, "high_pass not set to config val"
assert settings.detrend == True, "detrend not set to config val"
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exist_gracefully_if_json_not_readable(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/func.nii.gz'
missing_json = '/wrong/path/missing.json'
arguments['--clean-config'] = missing_json
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_input_is_gifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/func.L.func.gii'
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_dtseries_input_returned(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.func.type == "cifti"
assert settings.func.path == '/path/to/input/myfunc.dtseries.nii'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_nifti_input_returned_correctly(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.func.type == "nifti"
assert settings.func.path == '/path/to/input/myfunc.nii.gz'
def test_exists_gracefully_if_output_not_writable(self):
wrong_func = '/wrong/path/to/input/myfunc.nii.gz'
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = wrong_func
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_proper_output_returned_for_nifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
arguments['--smooth-fwhm'] = 8
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.output_func == '/path/to/input/myfunc_clean_s8.nii.gz'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_proper_output_returned_for_cifti(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.output_func == '/path/to/input/myfunc_clean_s0.dtseries.nii'
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exits_when_confounds_tsv_not_given(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one,two,three'
arguments['--confounds-tsv'] = None
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('pandas.read_csv', return_value = pd.DataFrame(columns = ['one', 'two', 'three']))
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_list_for_multi(self, mock_readable, mock_writable, mock_pdread):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one,two,three'
arguments['--confounds-tsv'] = '/path/to/confounds.tsv'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == ['one','two','three']
@patch('pandas.read_csv', return_value = pd.DataFrame(columns = ['one', 'two', 'three']))
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_list_for_one_item(self, mock_readable, mock_writable, mock_pdread):
arguments = copy.deepcopy(self.docopt_args)
arguments['--cf-cols'] = 'one'
arguments['--confounds-tsv'] = '/path/to/confounds.tsv'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == ['one']
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_list_arg_returns_empty_list_for_none(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.cf_cols == []
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_bandpass_filter_returns_none_if_none(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == None
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_bandpass_filter_returns_float_if_float(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--high-pass'] = '3.14'
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.high_pass == 3.14
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_filter_not_float(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['--high-pass'] = 'three'
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_exists_gracefully_if_surfaces_not_present(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.dtseries.nii'
arguments['--smooth-fwhm'] = 8
arguments['--left-surface'] = None
with pytest.raises(SystemExit):
settings = ciftify_clean_img.UserSettings(arguments)
@patch('ciftify.utils.check_input_readable', side_effect = _check_input_readble_side_effect)
@patch('ciftify.utils.check_output_writable', return_value = True)
def test_fwhm_is_0_if_not_smoothing(self, mock_readable, mock_writable):
arguments = copy.deepcopy(self.docopt_args)
arguments['<func_input>'] = '/path/to/input/myfunc.nii.gz'
arguments['--smooth-fwhm'] = None
settings = ciftify_clean_img.UserSettings(arguments)
assert settings.smooth.fwhm == 0
class TestMangleConfounds(unittest.TestCase):
input_signals = pd.DataFrame(data = {'x': [1,2,3,4,5],
'y': [0,0,1,2,4],
'z': [8,8,8,8,8]})
class SettingsStub(object):
def __init__(self, start_from, confounds,
cf_cols, cf_sq_cols, cf_td_cols, cf_sqtd_cols):
self.start_from_tr = start_from
self.confounds = confounds
self.cf_cols = cf_cols
self.cf_sq_cols = cf_sq_cols
self.cf_td_cols = cf_td_cols
self.cf_sqtd_cols = cf_sqtd_cols
def test_starts_from_correct_row(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = [],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y'].values
expected_output = np.array([1,2,4])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_that_omitted_cols_not_output(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = [],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
assert 'z' not in list(confound_signals.columns.values)
def test_td_col_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_lag'].values
expected_output = np.array([0,1,2])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_sq_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = [])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_sq'].values
expected_output = np.array([1,4,16])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_sqtd_col_is_returned(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = ['y'])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
test_output = confound_signals['y_sqlag'].values
expected_output = np.array([0,1,4])
assert np.allclose(test_output, expected_output, equal_nan = True), \
"{} not equal to {}".format(test_output, expected_output)
def test_all_cols_named_as_expected(self):
settings = self.SettingsStub(start_from = 2,
confounds = self.input_signals,
cf_cols = ['x', 'y'],
cf_sq_cols = ['y'],
cf_td_cols = ['y'],
cf_sqtd_cols = ['y'])
confound_signals = ciftify_clean_img.mangle_confounds(settings)
for coln in ['x', 'y', 'y_sq', 'y_lag', 'y_sqlag']:
assert coln in list(confound_signals.columns.values)
@patch('nilearn.image.clean_img')
class TestCleanImage(unittest.TestCase):
# note this one need to patch nilean.clean_img just to check it is only called when asked for
def test_nilearn_not_called_not_indicated(self, nilearn_clean):
class SettingsStub(object):
def __init__(self):
self.detrend = False
self.standardize = False
self.high_pass = None
self.low_pass = None
input_img = 'fake_img.nii.gz'
confound_signals = None
settings = SettingsStub()
output_img = ciftify_clean_img.clean_image_with_nilearn(
input_img, confound_signals, settings)
nilearn_clean.assert_not_called()
def test_drop_image():
img1 = Nifti1Image(np.ones((2, 2, 2, 1)), affine=np.eye(4))
img2 = Nifti1Image(np.ones((2, 2, 2, 1)) + 1, affine=np.eye(4))
img3 = Nifti1Image(np.ones((2, 2, 2, 1)) + 2, affine=np.eye(4))
img4 = Nifti1Image(np.ones((2, 2, 2, 1)) + 3, affine=np.eye(4))
img5 = Nifti1Image(np.ones((2, 2, 2, 1)) + 4, affine=np.eye(4))
img_1to5 = nilearn.image.concat_imgs([img1, img2, img3, img4, img5])
img_trim = ciftify_clean_img.image_drop_dummy_trs(img_1to5, 2)
assert np.allclose(img_trim.get_fdata()[1,1,1,:], np.array([3, 4, 5]))
assert img_trim.header.get_data_shape() == (2,2,2,3)
| 41.825974 | 97 | 0.643048 |
795864a2e7b5e845dd42716f60ed9e83e5706ef0 | 1,722 | py | Python | 18-placeBlockFromArgs.py | hashbangstudio/Python-Minecraft-Examples | 5e4632022a99a7ccc130972d4e8da9d09572492d | [
"BSD-3-Clause"
] | 4 | 2016-06-07T15:30:52.000Z | 2020-04-13T15:16:28.000Z | 18-placeBlockFromArgs.py | hashbangstudio/Python-Minecraft-Examples | 5e4632022a99a7ccc130972d4e8da9d09572492d | [
"BSD-3-Clause"
] | null | null | null | 18-placeBlockFromArgs.py | hashbangstudio/Python-Minecraft-Examples | 5e4632022a99a7ccc130972d4e8da9d09572492d | [
"BSD-3-Clause"
] | 3 | 2016-11-27T22:27:16.000Z | 2021-12-12T14:53:11.000Z | #!/usr/bin/env python
#import the needed modules
from mcpi.minecraft import *
from mcpi.block import *
import sys
if __name__ == "__main__":
# Create a connection to the Minecraft game
mc = Minecraft.create()
# Get the player position
playerPosition = mc.player.getTilePos()
# Set coordinates (position) for the block that is slightly away from the player
# Note y is the vertical coordinate, x and z the horizontal
blockXposn = playerPosition.x + 1
blockZposn = playerPosition.z + 1
# set the y coordinate to be at the height of the world at the (x,z) horisontal coordinate
blockYposn = mc.getHeight(blockXposn, blockZposn)
blockToPlace = AIR
numOfArgs = len(sys.argv)
if numOfArgs == 2 or numOfArgs == 3 :
blockArgs = sys.argv[1].replace('(','').replace(')','').split(',')
blockId = int(blockArgs[0])
blockData = int(blockArgs[1])
blockToPlace = Block(blockId,blockData)
if numOfArgs == 3:
coords = sys.argv[2].replace('(','').replace(')','').split(',')
print ("using coords = " + str(coords))
blockXposn = int(coords[0])
blockYposn = int(coords[1])
blockZposn = int(coords[2])
else:
print("To place block next to player:")
print("Usage : python script.py blockId,blockData")
print("To place block at a specific coordinate")
print("Usage : python script.py blockId,blockData x,y,z")
print("Expected 1 or 2 aguments but received " + str(numOfArgs-1))
sys.exit()
print str(blockToPlace)
print str((blockXposn, blockYposn, blockZposn))
mc.setBlock(blockXposn, blockYposn, blockZposn, blockToPlace)
| 35.142857 | 94 | 0.639373 |
7958655bc653551045f19c8d435d785d50ec5f58 | 2,127 | py | Python | test_frame/test_delay_task/test_delay_push.py | DJMIN/funboost | 7570ca2909bb0b44a1080f5f98aa96c86d3da9d4 | [
"Apache-2.0"
] | 120 | 2021-12-26T03:27:12.000Z | 2022-03-31T16:20:44.000Z | test_frame/test_delay_task/test_delay_push.py | mooti-barry/funboost | 2cd9530e2c4e5a52fc921070d243d402adbc3a0e | [
"Apache-2.0"
] | 18 | 2021-12-31T06:26:37.000Z | 2022-03-31T16:16:33.000Z | test_frame/test_delay_task/test_delay_push.py | mooti-barry/funboost | 2cd9530e2c4e5a52fc921070d243d402adbc3a0e | [
"Apache-2.0"
] | 27 | 2021-12-26T16:12:31.000Z | 2022-03-26T17:43:08.000Z | # 需要用publish,而不是push,这个前面已经说明了,如果要传函数入参本身以外的参数到中间件,需要用publish。
# 不然框架分不清哪些是函数入参,哪些是控制参数。如果无法理解就,就好好想想琢磨下celery的 apply_async 和 delay的关系。
from test_frame.test_delay_task.test_delay_consume import f
import datetime
import time
from funboost import PriorityConsumingControlConfig
"""
测试发布延时任务,不是发布后马上就执行函数。
countdown 和 eta 只能设置一个。
countdown 指的是 离发布多少秒后执行,
eta是指定的精确时间运行一次。
misfire_grace_time 是指定消息轮到被消费时候,如果已经超过了应该运行的时间多少秒之内,仍然执行。
misfire_grace_time 如果设置为None,则消息一定会被运行,不会由于大连消息积压导致消费时候已近太晚了而取消运行。
misfire_grace_time 如果不为None,必须是大于等于1的整数,此值表示消息轮到消费时候超过本应该运行的时间的多少秒内仍然执行。
此值的数字设置越小,如果由于消费慢的原因,就有越大概率导致消息被丢弃不运行。如果此值设置为1亿,则几乎不会导致放弃运行(1亿的作用接近于None了)
如果还是不懂这个值的作用,可以百度 apscheduler 包的 misfire_grace_time 概念
"""
for i in range(1, 20):
time.sleep(1)
# 消息发布10秒后再执行。如果消费慢导致任务积压,misfire_grace_time为None,即使轮到消息消费时候离发布超过10秒了仍然执行。
f.publish({'x': i}, priority_control_config=PriorityConsumingControlConfig(countdown=10))
# 规定消息在17点56分30秒运行,如果消费慢导致任务积压,misfire_grace_time为None,即使轮到消息消费时候已经过了17点56分30秒仍然执行。
f.publish({'x': i * 10}, priority_control_config=PriorityConsumingControlConfig(
eta=datetime.datetime(2021, 5, 19, 17, 56, 30) + datetime.timedelta(seconds=i)))
# 消息发布10秒后再执行。如果消费慢导致任务积压,misfire_grace_time为30,如果轮到消息消费时候离发布超过40 (10+30) 秒了则放弃执行,
# 如果轮到消息消费时候离发布时间是20秒,由于 20 < (10 + 30),则仍然执行
f.publish({'x': i * 100}, priority_control_config=PriorityConsumingControlConfig(
countdown=10, misfire_grace_time=30))
# 规定消息在17点56分30秒运行,如果消费慢导致任务积压,如果轮到消息消费时候已经过了17点57分00秒,
# misfire_grace_time为30,如果轮到消息消费时候超过了17点57分0秒 则放弃执行,
# 如果如果轮到消息消费时候是17点56分50秒则执行。
f.publish({'x': i * 1000}, priority_control_config=PriorityConsumingControlConfig(
eta=datetime.datetime(2021, 5, 19, 17, 56, 30) + datetime.timedelta(seconds=i),
misfire_grace_time=30)) # 按指定的时间运行一次。
# 这个设置了消息由于消息堆积导致运行的时候比本应该运行的时间如果小于1亿秒,就仍然会被执行,所以几乎肯定不会被放弃运行
f.publish({'x': i * 10000}, priority_control_config=PriorityConsumingControlConfig(
eta=datetime.datetime(2021, 5, 19, 17, 56, 30) + datetime.timedelta(seconds=i),
misfire_grace_time=100000000)) # 按指定的时间运行一次。
| 44.3125 | 93 | 0.788905 |
79586582cf1f25e4b7ca098699616f2214c037e6 | 2,713 | py | Python | src/models/imageModel.py | RecicladoraSanMiguel/recsm_odoo_image_manager | 1a4459377c3c274353cb6b5dd18f8bff11542e71 | [
"MIT"
] | null | null | null | src/models/imageModel.py | RecicladoraSanMiguel/recsm_odoo_image_manager | 1a4459377c3c274353cb6b5dd18f8bff11542e71 | [
"MIT"
] | 2 | 2022-01-13T01:45:44.000Z | 2022-03-12T00:03:11.000Z | src/models/imageModel.py | RecicladoraSanMiguel/recsm-python-NetIMmanager | 1a4459377c3c274353cb6b5dd18f8bff11542e71 | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
from datetime import datetime
from pytz import timezone
from ..config import constants
import base64
import os
class ImageModel:
def __init__(self):
self._font = font = ImageFont.truetype(os.path.join(os.path.dirname(__file__), '../assets/fonts/')+"Ubuntu-R.ttf", 18)
self._topLeftHeightDivider = 10 # increase to make the textbox shorter in height
self._topLeftWidthDivider = 5 # increase to make the textbox shorter in width
self._textPadding = 2
# Defines images size
self._image_width = 400
self._image_height = 300
def _convert_image_to_thumbnail(self, img):
img.thumbnail((self._image_width, self._image_height), Image.ANTIALIAS)
def _add_image_timestamp(self, img, is_merged=False):
overlay = Image.new('RGBA', img.size, (255, 255, 255, 0))
o = ImageDraw.Draw(overlay)
o.rectangle(
[0, 270, img.size[0], img.size[1]],
fill=(0, 0, 0, 220)
)
o.text(
[300 if is_merged else 120 + self._textPadding, 272 + self._textPadding],
str(datetime.now().strftime('%d/%m/%Y %H:%M:%S')),
fill="white",
font=self._font
)
return Image.alpha_composite(img, overlay)
def merge_images(self, img1, img2):
merged_image = Image.new("RGBA", (self._image_width * 2, self._image_height))
merged_image.paste(img1)
merged_image.paste(img2, (self._image_width, 0))
return merged_image
def process_image(self, img1, img2=False):
self._convert_image_to_thumbnail(img1)
image = ImageModel.get_rbga_image(img1)
if img2:
self._convert_image_to_thumbnail(img2)
image = self.merge_images(img1, ImageModel.get_rbga_image(img2))
image = self._add_image_timestamp(image, True if img2 else False)
output = BytesIO()
if constants.DEBUG:
image.show()
image.save(output, format='PNG')
return base64.b64encode(output.getvalue())
@staticmethod
def get_local_time():
tz = timezone(constants.TIMEZONE)
local_time = datetime.now(tz)
return local_time.strftime('%d/%m/%Y %H:%M:%S')
@staticmethod
def get_rbga_image(img):
return img.convert("RGBA")
@staticmethod
def convert_bytes_to_image(image_request_bytes):
try:
return Image.open(BytesIO(image_request_bytes))
except IOError as e:
print("== Unable to convert Bytes to Image ==")
print(e)
print("== End of Exception ==")
return False
| 30.144444 | 126 | 0.626613 |
795865d303ce71a95975b7962a3fd957ad5b984c | 2,699 | py | Python | gambolputty/modifier/Permutate.py | lukebeer/GambolPutty | d642433bb2a6a54be6b4cfaa12a507994af8445a | [
"Apache-2.0"
] | null | null | null | gambolputty/modifier/Permutate.py | lukebeer/GambolPutty | d642433bb2a6a54be6b4cfaa12a507994af8445a | [
"Apache-2.0"
] | null | null | null | gambolputty/modifier/Permutate.py | lukebeer/GambolPutty | d642433bb2a6a54be6b4cfaa12a507994af8445a | [
"Apache-2.0"
] | 1 | 2019-12-03T11:36:32.000Z | 2019-12-03T11:36:32.000Z | # -*- coding: utf-8 -*-
import Utils
import itertools
import BaseThreadedModule
import Decorators
import Utils
import sys
@Decorators.ModuleDocstringParser
class Permutate(BaseThreadedModule.BaseThreadedModule):
"""
Creates successive len('target_fields') length permutations of elements in 'source_field'.
To add some context data to each emitted event 'context_data_field' can specify a field
containing a dictionary with the values of 'source_field' as keys.
Configuration template:
- Permutate:
source_field: # <type: string; is: required>
target_fields: # <type: list; is: required>
context_data_field: # <default: ""; type:string; is: optional>
context_target_mapping: # <default: {}; type: dict; is: optional if context_data_field == "" else required>
receivers:
- NextModule
"""
module_type = "modifier"
"""Set module type"""
def handleEvent(self, event):
"""
Process the event.
@param event: dictionary
@return data: dictionary
"""
try:
context_data = event[self.getConfigurationValue('context_data_field')]
except KeyError:
context_data = False
try:
permutation_data = event[self.getConfigurationValue('source_field')]
except KeyError:
yield event
return
if type(permutation_data) is not list:
yield event
return
target_field_names = self.getConfigurationValue('target_fields')
context_target_mapping = self.getConfigurationValue('context_target_mapping')
for permutation in itertools.permutations(permutation_data, r=len(target_field_names)):
event_copy = event.copy()
if context_data:
try:
# Rewrite the context data keys to new keys in context_target_mapping
ctx_data = {}
for idx, dct in enumerate([context_data[key] for key in permutation if key in context_data]):
for mapping_key, newkeys in context_target_mapping.items():
if mapping_key in dct:
ctx_data[newkeys[idx]] = dct[mapping_key]
event_copy.update(ctx_data)
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Could not add context data. Exception: %s, Error: %s." % (etype, evalue))
perm = dict(zip(target_field_names, permutation))
event_copy.update(perm)
yield event_copy | 38.557143 | 123 | 0.602816 |
7958661434bca383fa6db1b9a561b74ab0600ee2 | 4,344 | py | Python | symbols.py | beefoo/alt-mta | 7301ee479f5a118be36ff081329b743ee6c7fa47 | [
"MIT"
] | null | null | null | symbols.py | beefoo/alt-mta | 7301ee479f5a118be36ff081329b743ee6c7fa47 | [
"MIT"
] | null | null | null | symbols.py | beefoo/alt-mta | 7301ee479f5a118be36ff081329b743ee6c7fa47 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# python symbols.py -symbol data/symbol_dot_express.png
# Image source: http://web.mta.info/maps/images/subway_map_Jul18_2700x3314.jpg
# Template matching: https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.html
import argparse
import cv2
import json
import math
import numpy as np
import os
from pprint import pprint
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_IMAGE", default="data/subway_map_Jul18_2700x3314.jpg", help="Path to input image")
parser.add_argument('-dir', dest="INPUT_SYMBOL_DIR", default="data/symbols/", help="Path to input symbol directory")
parser.add_argument('-threshold', dest="THRESHOLD", default=0.75, type=float, help="Matching threshold")
parser.add_argument('-mout', dest="OUTPUT_IMAGE", default="output/symbols.png", help="JSON output file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/symbols.json", help="JSON output file")
args = parser.parse_args()
SYMBOLS = [
{"image": "symbol_dot_local.png"},
{"image": "symbol_dot_express.png", "express": True},
{"image": "symbol_dot_express_hub.png", "hub": True, "express": True},
{"image": "symbol_dot_express_hub2.png", "hub": True, "express": True},
{"image": "symbol_pill_express_hub2.png", "hub": True, "express": True, "threshold": 0.8},
{"image": "symbol_pill_express_hub1.png", "hub": True, "express": True, "threshold": 0.8},
{"image": "symbol_dot_local_sir.png", "threshold": 0.798},
{"image": "symbol_dot_local_closed.png", "threshold": 0.95},
{"image": "symbol_dot_local_custom1.png", "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom1.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom2.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom3.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom4.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom5.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_express_hub_custom6.png", "hub": True, "express": True, "threshold": 0.95},
{"image": "symbol_pill_local_hub_custom1.png", "hub": True, "threshold": 0.95},
{"image": "symbol_pill_local_hub_custom2.png", "hub": True, "threshold": 0.95}
]
# Parse symbols
symbols = SYMBOLS[:]
for i, symbol in enumerate(symbols):
symbols[i]["path"] = args.INPUT_SYMBOL_DIR + symbol["image"]
symbols[i]["meta"] = {
"express": (1 if "express" in symbol else 0),
"hub": (1 if "hub" in symbol else 0)
}
# Read source image
img_rgb = cv2.imread(args.INPUT_IMAGE)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# Find each symbol
symbolsData = []
for i, symbol in enumerate(symbols):
template = cv2.imread(symbol["path"], 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = args.THRESHOLD
if "threshold" in symbol:
threshold = symbol["threshold"]
loc = np.where(res >= threshold)
matches = zip(*loc[::-1])
for m in matches:
cx = m[0] + w * 0.5
cy = m[1] + h * 0.5
exists = False
# Check if center exists in a previous match
for sd in symbolsData:
p = sd["point"]
s = sd["size"]
if p[0] < cx < p[0]+s[0] and p[1] < cy < p[1]+s[1]:
exists = True
break
if not exists:
d = symbol["meta"].copy()
d.update({
"point": [int(m[0]), int(m[1])],
"size": [int(w), int(h)]
})
symbolsData.append(d)
print "Found %s symbols for %s" % (len(matches), symbol["path"])
# write image for debugging
for symbol in symbolsData:
pt = symbol["point"]
sz = symbol["size"]
cv2.rectangle(img_rgb, tuple(pt), (pt[0] + sz[0], pt[1] + sz[1]), (0,0,255), 1)
cv2.imwrite(args.OUTPUT_IMAGE, img_rgb)
print "Wrote matches to %s" % args.OUTPUT_IMAGE
jsonOut = symbolsData[:]
# Write to file
with open(args.OUTPUT_FILE, 'w') as f:
json.dump(jsonOut, f)
print "Wrote %s items to %s" % (len(symbolsData), args.OUTPUT_FILE)
| 41.371429 | 128 | 0.646179 |
7958671f5704866f591222b110be5ec60282ee7e | 8,602 | py | Python | venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_size.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 15 | 2020-06-29T08:33:39.000Z | 2022-02-12T00:28:51.000Z | venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_size.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 30 | 2020-04-15T19:37:40.000Z | 2020-04-22T21:19:35.000Z | venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_size.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 11 | 2020-06-29T08:40:24.000Z | 2022-02-24T17:39:16.000Z | """
Provides classes of simple units that will be used with AxesDivider
class (or others) to determine the size of each axes. The unit
classes define `get_size` method that returns a tuple of two floats,
meaning relative and absolute sizes, respectively.
Note that this class is nothing more than a simple tuple of two
floats. Take a look at the Divider class to see how these two
values are used.
"""
from numbers import Number
from matplotlib import cbook
from matplotlib.axes import Axes
class _Base:
"Base class"
def __rmul__(self, other):
float(other) # just to check if number if given
return Fraction(other, self)
def __add__(self, other):
if isinstance(other, _Base):
return Add(self, other)
else:
float(other)
other = Fixed(other)
return Add(self, other)
class Add(_Base):
def __init__(self, a, b):
self._a = a
self._b = b
def get_size(self, renderer):
a_rel_size, a_abs_size = self._a.get_size(renderer)
b_rel_size, b_abs_size = self._b.get_size(renderer)
return a_rel_size + b_rel_size, a_abs_size + b_abs_size
class AddList(_Base):
def __init__(self, add_list):
self._list = add_list
def get_size(self, renderer):
sum_rel_size = sum([a.get_size(renderer)[0] for a in self._list])
sum_abs_size = sum([a.get_size(renderer)[1] for a in self._list])
return sum_rel_size, sum_abs_size
class Fixed(_Base):
"""
Simple fixed size with absolute part = *fixed_size* and relative part = 0.
"""
def __init__(self, fixed_size):
self.fixed_size = fixed_size
def get_size(self, renderer):
rel_size = 0.
abs_size = self.fixed_size
return rel_size, abs_size
class Scaled(_Base):
"""
Simple scaled(?) size with absolute part = 0 and
relative part = *scalable_size*.
"""
def __init__(self, scalable_size):
self._scalable_size = scalable_size
def get_size(self, renderer):
rel_size = self._scalable_size
abs_size = 0.
return rel_size, abs_size
Scalable = Scaled
def _get_axes_aspect(ax):
aspect = ax.get_aspect()
# when aspec is "auto", consider it as 1.
if aspect in ('normal', 'auto'):
aspect = 1.
elif aspect == "equal":
aspect = 1
else:
aspect = float(aspect)
return aspect
class AxesX(_Base):
"""
Scaled size whose relative part corresponds to the data width
of the *axes* multiplied by the *aspect*.
"""
def __init__(self, axes, aspect=1., ref_ax=None):
self._axes = axes
self._aspect = aspect
if aspect == "axes" and ref_ax is None:
raise ValueError("ref_ax must be set when aspect='axes'")
self._ref_ax = ref_ax
def get_size(self, renderer):
l1, l2 = self._axes.get_xlim()
if self._aspect == "axes":
ref_aspect = _get_axes_aspect(self._ref_ax)
aspect = ref_aspect / _get_axes_aspect(self._axes)
else:
aspect = self._aspect
rel_size = abs(l2-l1)*aspect
abs_size = 0.
return rel_size, abs_size
class AxesY(_Base):
"""
Scaled size whose relative part corresponds to the data height
of the *axes* multiplied by the *aspect*.
"""
def __init__(self, axes, aspect=1., ref_ax=None):
self._axes = axes
self._aspect = aspect
if aspect == "axes" and ref_ax is None:
raise ValueError("ref_ax must be set when aspect='axes'")
self._ref_ax = ref_ax
def get_size(self, renderer):
l1, l2 = self._axes.get_ylim()
if self._aspect == "axes":
ref_aspect = _get_axes_aspect(self._ref_ax)
aspect = _get_axes_aspect(self._axes)
else:
aspect = self._aspect
rel_size = abs(l2-l1)*aspect
abs_size = 0.
return rel_size, abs_size
class MaxExtent(_Base):
"""
Size whose absolute part is the largest width (or height) of
the given *artist_list*.
"""
def __init__(self, artist_list, w_or_h):
self._artist_list = artist_list
cbook._check_in_list(["width", "height"], w_or_h=w_or_h)
self._w_or_h = w_or_h
def add_artist(self, a):
self._artist_list.append(a)
def get_size(self, renderer):
rel_size = 0.
w_list, h_list = [], []
for a in self._artist_list:
bb = a.get_window_extent(renderer)
w_list.append(bb.width)
h_list.append(bb.height)
dpi = a.get_figure().get_dpi()
if self._w_or_h == "width":
abs_size = max(w_list)/dpi
elif self._w_or_h == "height":
abs_size = max(h_list)/dpi
return rel_size, abs_size
class MaxWidth(_Base):
"""
Size whose absolute part is the largest width of
the given *artist_list*.
"""
def __init__(self, artist_list):
self._artist_list = artist_list
def add_artist(self, a):
self._artist_list.append(a)
def get_size(self, renderer):
rel_size = 0.
w_list = []
for a in self._artist_list:
bb = a.get_window_extent(renderer)
w_list.append(bb.width)
dpi = a.get_figure().get_dpi()
abs_size = max(w_list)/dpi
return rel_size, abs_size
class MaxHeight(_Base):
"""
Size whose absolute part is the largest height of
the given *artist_list*.
"""
def __init__(self, artist_list):
self._artist_list = artist_list
def add_artist(self, a):
self._artist_list.append(a)
def get_size(self, renderer):
rel_size = 0.
h_list = []
for a in self._artist_list:
bb = a.get_window_extent(renderer)
h_list.append(bb.height)
dpi = a.get_figure().get_dpi()
abs_size = max(h_list)/dpi
return rel_size, abs_size
class Fraction(_Base):
"""
An instance whose size is a *fraction* of the *ref_size*.
>>> s = Fraction(0.3, AxesX(ax))
"""
def __init__(self, fraction, ref_size):
self._fraction_ref = ref_size
self._fraction = fraction
def get_size(self, renderer):
if self._fraction_ref is None:
return self._fraction, 0.
else:
r, a = self._fraction_ref.get_size(renderer)
rel_size = r*self._fraction
abs_size = a*self._fraction
return rel_size, abs_size
class Padded(_Base):
"""
Return a instance where the absolute part of *size* is
increase by the amount of *pad*.
"""
def __init__(self, size, pad):
self._size = size
self._pad = pad
def get_size(self, renderer):
r, a = self._size.get_size(renderer)
rel_size = r
abs_size = a + self._pad
return rel_size, abs_size
def from_any(size, fraction_ref=None):
"""
Creates Fixed unit when the first argument is a float, or a
Fraction unit if that is a string that ends with %. The second
argument is only meaningful when Fraction unit is created.::
>>> a = Size.from_any(1.2) # => Size.Fixed(1.2)
>>> Size.from_any("50%", a) # => Size.Fraction(0.5, a)
"""
if isinstance(size, Number):
return Fixed(size)
elif isinstance(size, str):
if size[-1] == "%":
return Fraction(float(size[:-1]) / 100, fraction_ref)
raise ValueError("Unknown format")
class SizeFromFunc(_Base):
def __init__(self, func):
self._func = func
def get_size(self, renderer):
rel_size = 0.
bb = self._func(renderer)
dpi = renderer.points_to_pixels(72.)
abs_size = bb/dpi
return rel_size, abs_size
class GetExtentHelper:
_get_func_map = {
"left": lambda self, axes_bbox: axes_bbox.xmin - self.xmin,
"right": lambda self, axes_bbox: self.xmax - axes_bbox.xmax,
"bottom": lambda self, axes_bbox: axes_bbox.ymin - self.ymin,
"top": lambda self, axes_bbox: self.ymax - axes_bbox.ymax,
}
def __init__(self, ax, direction):
cbook._check_in_list(self._get_func_map, direction=direction)
self._ax_list = [ax] if isinstance(ax, Axes) else ax
self._direction = direction
def __call__(self, renderer):
get_func = self._get_func_map[self._direction]
vl = [get_func(ax.get_tightbbox(renderer, call_axes_locator=False),
ax.bbox)
for ax in self._ax_list]
return max(vl)
| 27.394904 | 78 | 0.612532 |
7958675e6794dab5ef9addf67dd6e9c684f263dc | 922 | py | Python | SoftLayer/CLI/sshkey/list.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/CLI/sshkey/list.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/CLI/sshkey/list.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | """List SSH keys."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.option('--sortby',
help='Column to sort by',
type=click.Choice(['id',
'label',
'fingerprint',
'notes']))
@environment.pass_env
def cli(env, sortby):
"""List SSH keys."""
mgr = SoftLayer.SshKeyManager(env.client)
keys = mgr.list_keys()
table = formatting.Table(['id', 'label', 'fingerprint', 'notes'])
table.sortby = sortby
for key in keys:
table.add_row([key['id'],
key.get('label'),
key.get('fingerprint'),
key.get('notes', '-')])
env.fout(table)
| 26.342857 | 69 | 0.5282 |
79586820519d43d9d1ea9a482be2bcfb51559331 | 2,118 | py | Python | tests/test_diffeq/test_perturbed/test_step/test_perturbedstepsolution.py | treid5/probnum | fabb51243d0952fbd35e542aeb5c2dc9a449ec81 | [
"MIT"
] | 1 | 2021-04-16T14:45:26.000Z | 2021-04-16T14:45:26.000Z | tests/test_diffeq/test_perturbed/test_step/test_perturbedstepsolution.py | pitmonticone/probnum | 1fed705b2443a14d08419e16f98f6ef815ae9ffa | [
"MIT"
] | 42 | 2021-03-08T07:20:40.000Z | 2022-03-28T05:04:48.000Z | tests/test_diffeq/test_perturbed/test_step/test_perturbedstepsolution.py | pitmonticone/probnum | 1fed705b2443a14d08419e16f98f6ef815ae9ffa | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from scipy.integrate._ivp import rk
import probnum.problems.zoo.diffeq as diffeq_zoo
from probnum import diffeq, randvars
@pytest.fixture
def steprule():
return diffeq.stepsize.AdaptiveSteps(0.1, atol=1e-4, rtol=1e-4)
@pytest.fixture
def perturbed_solution(steprule):
y0 = np.array([0.1, 0.1])
ode = diffeq_zoo.lotkavolterra(t0=0.0, tmax=1.0, y0=y0)
rng = np.random.default_rng(seed=1)
testsolver = diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(
rk.RK45, steprule=steprule
)
sol = diffeq.perturbed.step.PerturbedStepSolver(
rng=rng,
solver=testsolver,
noise_scale=0.1,
perturb_function=diffeq.perturbed.step.perturb_uniform,
)
return sol.solve(ode)
def test_states(perturbed_solution):
assert isinstance(perturbed_solution.states, randvars._RandomVariableList)
def test_call(perturbed_solution):
"""Test for continuity of the dense output.
Small changes of the locations should come with small changes of the states.
"""
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:]).mean,
perturbed_solution.states[0:].mean,
atol=1e-14,
rtol=1e-14,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:-1] + 1e-14).mean,
perturbed_solution(perturbed_solution.locations[0:-1]).mean,
atol=1e-12,
rtol=1e-12,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[1:] - 1e-14).mean,
perturbed_solution(perturbed_solution.locations[1:]).mean,
atol=1e-12,
rtol=1e-12,
)
def test_len(perturbed_solution):
np.testing.assert_allclose(
len(perturbed_solution),
len(perturbed_solution.locations),
atol=1e-14,
rtol=1e-14,
)
def test_getitem(perturbed_solution):
np.testing.assert_allclose(
perturbed_solution.interpolants[1](perturbed_solution.locations[1]),
perturbed_solution[1].mean,
atol=1e-14,
rtol=1e-14,
)
| 27.868421 | 80 | 0.688857 |
795868e2eef7e91ca094f39631a2a8ec1f6d28ee | 3,623 | py | Python | stac_fastapi/sqlalchemy/tests/conftest.py | AsgerPetersen/stac-fastapi | 27e134589107654920c2f1dba54773c8c85d4e1a | [
"MIT"
] | 1 | 2021-07-08T23:20:42.000Z | 2021-07-08T23:20:42.000Z | stac_fastapi/sqlalchemy/tests/conftest.py | nmandery/stac-fastapi | 8cce4fb449d6bdb36a788e959bce6012cd6f83e6 | [
"MIT"
] | null | null | null | stac_fastapi/sqlalchemy/tests/conftest.py | nmandery/stac-fastapi | 8cce4fb449d6bdb36a788e959bce6012cd6f83e6 | [
"MIT"
] | null | null | null | import json
import os
from typing import Callable, Dict
import pytest
from starlette.testclient import TestClient
from stac_fastapi.api.app import StacApi
from stac_fastapi.extensions.core import (
ContextExtension,
FieldsExtension,
QueryExtension,
SortExtension,
TransactionExtension,
)
from stac_fastapi.sqlalchemy.config import SqlalchemySettings
from stac_fastapi.sqlalchemy.core import CoreCrudClient
from stac_fastapi.sqlalchemy.models import database
from stac_fastapi.sqlalchemy.session import Session
from stac_fastapi.sqlalchemy.transactions import (
BulkTransactionsClient,
TransactionsClient,
)
from stac_fastapi.sqlalchemy.types.search import SQLAlchemySTACSearch
from stac_fastapi.types.config import Settings
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
class TestSettings(SqlalchemySettings):
class Config:
env_file = ".env.test"
settings = TestSettings()
Settings.set(settings)
@pytest.fixture(autouse=True)
def cleanup(postgres_core: CoreCrudClient, postgres_transactions: TransactionsClient):
yield
collections = postgres_core.all_collections(request=MockStarletteRequest)
for coll in collections["collections"]:
if coll["id"].split("-")[0] == "test":
# Delete the items
items = postgres_core.item_collection(
coll["id"], limit=100, request=MockStarletteRequest
)
for feat in items["features"]:
postgres_transactions.delete_item(
feat["id"], feat["collection"], request=MockStarletteRequest
)
# Delete the collection
postgres_transactions.delete_collection(
coll["id"], request=MockStarletteRequest
)
@pytest.fixture
def load_test_data() -> Callable[[str], Dict]:
def load_file(filename: str) -> Dict:
with open(os.path.join(DATA_DIR, filename)) as file:
return json.load(file)
return load_file
class MockStarletteRequest:
base_url = "http://test-server"
@pytest.fixture
def db_session() -> Session:
return Session(
reader_conn_string=settings.reader_connection_string,
writer_conn_string=settings.writer_connection_string,
)
@pytest.fixture
def postgres_core(db_session):
return CoreCrudClient(
session=db_session,
item_table=database.Item,
collection_table=database.Collection,
token_table=database.PaginationToken,
)
@pytest.fixture
def postgres_transactions(db_session):
return TransactionsClient(
session=db_session,
item_table=database.Item,
collection_table=database.Collection,
)
@pytest.fixture
def postgres_bulk_transactions(db_session):
return BulkTransactionsClient(session=db_session)
@pytest.fixture
def api_client(db_session):
settings = SqlalchemySettings()
return StacApi(
settings=settings,
client=CoreCrudClient(session=db_session),
extensions=[
TransactionExtension(
client=TransactionsClient(session=db_session), settings=settings
),
ContextExtension(),
SortExtension(),
FieldsExtension(),
QueryExtension(),
],
search_request_model=SQLAlchemySTACSearch,
)
@pytest.fixture
def app_client(api_client, load_test_data, postgres_transactions):
coll = load_test_data("test_collection.json")
postgres_transactions.create_collection(coll, request=MockStarletteRequest)
with TestClient(api_client.app) as test_app:
yield test_app
| 27.656489 | 86 | 0.706321 |
79586c3621875081cf9785fd15d9a320f78287a6 | 580 | py | Python | File-Segregator.py | sandeep3119/pythonEveryday | 7014bce290d079cc2f300ee6114fe9e84c294f94 | [
"MIT"
] | 1 | 2021-09-20T14:57:43.000Z | 2021-09-20T14:57:43.000Z | File-Segregator.py | sandeep3119/pythonEveryday | 7014bce290d079cc2f300ee6114fe9e84c294f94 | [
"MIT"
] | null | null | null | File-Segregator.py | sandeep3119/pythonEveryday | 7014bce290d079cc2f300ee6114fe9e84c294f94 | [
"MIT"
] | null | null | null | import os
import shutil
import argparse
def main(TARGET_DIR):
os.chdir(TARGET_DIR)
data = os.listdir(".")
for file_ in data:
dir_name=file_.split(".")[-1]
os.makedirs(dir_name,exist_ok=True)
if (os.path.isfile(file_) and file_!='script.py') :
src_file_path=file_
dest_file_path=os.path.join(dir_name,file_)
shutil.move(src_file_path,dest_file_path)
if __name__=='__main__':
args=argparse.ArgumentParser()
parsed_args=args.parse_args('--target',default=os.getcwd())
main(parsed_args.target)
| 26.363636 | 63 | 0.658621 |
79586ca424243daed1f3af46e426e62988f83102 | 925 | py | Python | benchmarks/Evolution/both/evo_tests/test_cases/test_processing.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 3 | 2018-08-03T02:41:29.000Z | 2021-03-19T03:18:47.000Z | benchmarks/Evolution/both/evo_tests/test_cases/test_processing.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 3 | 2018-02-04T17:53:56.000Z | 2018-11-10T17:06:57.000Z | benchmarks/Evolution/both/evo_tests/test_cases/test_processing.py | nuprl/retic_performance | 621211c2f40251ce5364c33e72e4067e34a32013 | [
"MIT"
] | 1 | 2018-08-04T00:14:12.000Z | 2018-08-04T00:14:12.000Z | import unittest
from evo_tests.examples import ExampleLOC, ExampleLOTC, ExampleConfigurations, ExampleDealers
from evo_json.convert_py_json.convert_trait import convert_from_pj_loc
from evo_json.process_json.process_configuration import convert_config_to_dealer, convert_dealer_to_config
class TestConvert(unittest.TestCase):
def setUp(self):
self.ex_loc = ExampleLOC()
self.ex_lotc = ExampleLOTC()
self.ex_config = ExampleConfigurations()
self.ex_dealer = ExampleDealers()
def test_convert_loc(self):
self.assertEqual(convert_from_pj_loc(self.ex_loc.loc1), self.ex_lotc.lotc1)
def test_convert_to_config(self):
self.assertEqual(convert_dealer_to_config(self.ex_dealer.dealer_all_veg), self.ex_config.config)
def test_convert_from_config(self):
self.assertEqual(convert_config_to_dealer(self.ex_config.config), self.ex_dealer.dealer_all_veg)
| 33.035714 | 106 | 0.783784 |
79586ddf1e6230334836d814ac7e9e14f7b89540 | 1,911 | py | Python | project/01__get_TraderJoe_data.py | bsegot/housing-search-Chicago | d1a3029a4351d0db45c7f73f432d89ccdb469269 | [
"MIT"
] | 1 | 2021-12-29T17:43:34.000Z | 2021-12-29T17:43:34.000Z | project/01__get_TraderJoe_data.py | bsegot/housing-search-Chicago | d1a3029a4351d0db45c7f73f432d89ccdb469269 | [
"MIT"
] | 2 | 2020-01-26T16:02:33.000Z | 2020-02-08T22:06:09.000Z | project/01__get_TraderJoe_data.py | bsegot/housing-search-Chicago | d1a3029a4351d0db45c7f73f432d89ccdb469269 | [
"MIT"
] | 1 | 2020-02-08T16:24:24.000Z | 2020-02-08T16:24:24.000Z | import pandas as pd
from arcgis.gis import GIS
from arcgis.geocoding import geocode
# Read data from CSV files
csv1_path = "data_sets/illinois_ZIP.csv"
# Read in zipCode data
prop_df = pd.read_csv(csv1_path)
prop_df = pd.DataFrame(prop_df)
# Only view zip codes in Cook County
prop_df = prop_df[prop_df['County'] == "Cook"]
print("Number of rows is " + str(prop_df.shape[0]))
# Find unique zip codes (219)
uniqueZip = prop_df['Zip Code'].unique()
n = len(uniqueZip)
# Print
print("Total number of rows: " + str(n) + "\n")
# Initialize List
listedList = []
# Initiate GIS service
gis = GIS()
# Loop through all zip codes in Cook County and save unique items with geocode
for id in range(n):
yourZip = geocode(str(uniqueZip[id]))[0]
searchedItem = geocode("Trader Joe's", yourZip['extent'], max_locations=1000)
print("ID - " + str(id), end=" : ")
print("ZIPCODE - " + str(uniqueZip[id]), end=" : ")
print("NUM - " + str(len(searchedItem)))
for item2 in range(len(searchedItem)):
listedList.append({"ADDRESS":searchedItem[item2]['attributes']['Place_addr'],
"PHONE": searchedItem[item2]['attributes']['Phone'],
"POSTAL": searchedItem[item2]['attributes']['Postal'],
"LONGITUDE":searchedItem[item2]['location']['x'],
"LATITUDE":searchedItem[item2]['location']['y']})
listedList = pd.DataFrame(listedList)
print(listedList)
print(len(listedList))
print(listedList.head())
print("\n")
print(listedList.shape)
# Find if there are duplicates (by ADDRESS)
dup_index = listedList.duplicated(["ADDRESS"])
prop_dup = listedList[dup_index]
print(prop_dup.shape)
listedList.drop_duplicates(subset=['ADDRESS'],inplace=True)
print(listedList.shape)
# Write the new cleaned dataset to directory
csv2_path = "data_sets/traderJoes.csv"
listedList.to_csv(csv2_path,index=False)
| 30.822581 | 85 | 0.675563 |
79586eb5a1ab6a73e30b7c7537a5ae8b1602c944 | 5,516 | py | Python | src/azure-cli/azure/cli/command_modules/acr/run.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | 1 | 2019-06-21T05:07:38.000Z | 2019-06-21T05:07:38.000Z | src/azure-cli/azure/cli/command_modules/acr/run.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acr/run.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | 1 | 2019-06-21T05:08:09.000Z | 2019-06-21T05:08:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import uuid
import tempfile
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands import LongRunningOperation
from ._constants import ACR_TASK_YAML_DEFAULT_NAME
from ._stream_utils import stream_logs
from ._utils import (
validate_managed_registry,
get_validate_platform,
get_custom_registry_credentials,
get_yaml_and_values
)
from ._client_factory import cf_acr_registries_tasks
from ._archive_utils import upload_source_code, check_remote_source_code
RUN_NOT_SUPPORTED = 'Run is only available for managed registries.'
NULL_SOURCE_LOCATION = "/dev/null"
logger = get_logger(__name__)
def acr_run(cmd, # pylint: disable=too-many-locals
client,
registry_name,
source_location,
file=None,
values=None,
set_value=None,
set_secret=None,
cmd_value=None,
no_format=False,
no_logs=False,
no_wait=False,
timeout=None,
resource_group_name=None,
platform=None,
auth_mode=None):
_, resource_group_name = validate_managed_registry(
cmd, registry_name, resource_group_name, RUN_NOT_SUPPORTED)
if cmd_value and file:
raise CLIError(
"Azure Container Registry can run with either "
"--cmd myCommand /dev/null or "
"-f myFile mySourceLocation, but not both.")
client_registries = cf_acr_registries_tasks(cmd.cli_ctx)
source_location = prepare_source_location(
source_location, client_registries, registry_name, resource_group_name)
platform_os, platform_arch, platform_variant = get_validate_platform(cmd, platform)
EncodedTaskRunRequest, FileTaskRunRequest, PlatformProperties = cmd.get_models(
'EncodedTaskRunRequest', 'FileTaskRunRequest', 'PlatformProperties')
if source_location:
request = FileTaskRunRequest(
task_file_path=file if file else ACR_TASK_YAML_DEFAULT_NAME,
values_file_path=values,
values=(set_value if set_value else []) + (set_secret if set_secret else []),
source_location=source_location,
timeout=timeout,
platform=PlatformProperties(
os=platform_os,
architecture=platform_arch,
variant=platform_variant
),
credentials=get_custom_registry_credentials(
cmd=cmd,
auth_mode=auth_mode
)
)
else:
yaml_template, values_content = get_yaml_and_values(cmd_value, timeout, file)
import base64
request = EncodedTaskRunRequest(
encoded_task_content=base64.b64encode(yaml_template.encode()).decode(),
encoded_values_content=base64.b64encode(values_content.encode()).decode(),
values=(set_value if set_value else []) + (set_secret if set_secret else []),
source_location=source_location,
timeout=timeout,
platform=PlatformProperties(
os=platform_os,
architecture=platform_arch,
variant=platform_variant
),
credentials=get_custom_registry_credentials(
cmd=cmd,
auth_mode=auth_mode
)
)
queued = LongRunningOperation(cmd.cli_ctx)(client_registries.schedule_run(
resource_group_name=resource_group_name,
registry_name=registry_name,
run_request=request))
run_id = queued.run_id
logger.warning("Queued a run with ID: %s", run_id)
if no_wait:
return queued
logger.warning("Waiting for an agent...")
if no_logs:
from ._run_polling import get_run_with_polling
return get_run_with_polling(cmd, client, run_id, registry_name, resource_group_name)
return stream_logs(client, run_id, registry_name, resource_group_name, no_format, True)
def prepare_source_location(source_location, client_registries, registry_name, resource_group_name):
if source_location.lower() == NULL_SOURCE_LOCATION:
source_location = None
elif os.path.exists(source_location):
if not os.path.isdir(source_location):
raise CLIError(
"Source location should be a local directory path or remote URL.")
tar_file_path = os.path.join(tempfile.gettempdir(
), 'run_archive_{}.tar.gz'.format(uuid.uuid4().hex))
try:
source_location = upload_source_code(
client_registries, registry_name, resource_group_name,
source_location, tar_file_path, "", "")
except Exception as err:
raise CLIError(err)
finally:
try:
logger.debug(
"Deleting the archived source code from '%s'...", tar_file_path)
os.remove(tar_file_path)
except OSError:
pass
else:
source_location = check_remote_source_code(source_location)
logger.warning("Sending context to registry: %s...", registry_name)
return source_location
| 36.529801 | 100 | 0.6376 |
79586ee103a7f951122d0c5b505b6c89a464c45e | 1,162 | py | Python | orbit_transfer/configs/model/classification.py | sinzlab/orbit_transfer | 812d89af5c7ab26d9ea26766a4250ae023bb20b8 | [
"MIT"
] | null | null | null | orbit_transfer/configs/model/classification.py | sinzlab/orbit_transfer | 812d89af5c7ab26d9ea26766a4250ae023bb20b8 | [
"MIT"
] | null | null | null | orbit_transfer/configs/model/classification.py | sinzlab/orbit_transfer | 812d89af5c7ab26d9ea26766a4250ae023bb20b8 | [
"MIT"
] | null | null | null | from typing import Dict, Tuple
from nntransfer.configs.model.base import ModelConfig
class ClassificationModel(ModelConfig):
fn = "orbit_transfer.models.classification_model_builder"
def __init__(self, **kwargs):
self.load_kwargs(**kwargs)
self.type: str = "resnet50"
self.core_type: str = "conv"
self.conv_stem_kernel_size: int = 3
self.conv_stem_padding: int = 1
self.conv_stem_stride: int = 1
self.core_stride: int = 1
self.max_pool_after_stem: bool = False
self.advanced_init: bool = False
self.zero_init_residual: bool = False
self.adaptive_pooling: bool = False
self.avg_pool: bool = False
# resnet specific
self.noise_adv_classification: bool = False
self.noise_adv_regression: bool = False
self.num_noise_readout_layers: int = 1
self.noise_sigmoid_output: bool = self.noise_adv_classification
# vgg specific
self.pretrained: bool = False
self.pretrained_path: str = ""
self.readout_type: str = "dense"
self.add_buffer: Tuple = ()
super().__init__(**kwargs)
| 34.176471 | 71 | 0.657487 |
79586ef1b4c1fb6a5f5cc2f23bbf0e40a62d1f71 | 2,211 | py | Python | mailchimp3/entities/reportgoogleanalytics.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | 311 | 2015-11-27T07:15:42.000Z | 2019-03-06T15:55:40.000Z | mailchimp3/entities/reportgoogleanalytics.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | 144 | 2016-01-25T21:29:33.000Z | 2019-02-24T13:36:16.000Z | mailchimp3/entities/reportgoogleanalytics.py | nizamarusada/python-mailchimp | e67ba19848d1975d2b142fab960633fc3df8dff4 | [
"MIT"
] | 105 | 2015-10-21T14:34:38.000Z | 2019-03-01T21:34:44.000Z | # coding=utf-8
"""
The Google Analytics API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/reports/google-analytics/
Schema: https://api.mailchimp.com/schema/3.0/Reports/GoogleAnalytics/Collection.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class ReportGoogleAnalytics(BaseApi):
"""
Get top open locations for a specific campaign.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ReportGoogleAnalytics, self).__init__(*args, **kwargs)
self.endpoint = 'reports'
self.campaign_id = None
self.profile_id = None
def all(self, campaign_id, get_all=False, **queryparams):
"""
Get a summary of Google Analytics reports for a specific campaign.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.campaign_id = campaign_id
if get_all:
return self._iterate(url=self._build_path(campaign_id, 'google-analytics'), **queryparams)
else:
return self._mc_client._get(url=self._build_path(campaign_id, 'google-analytics'), **queryparams)
def get(self, campaign_id, profile_id, **queryparams):
"""
Get information about a specific Google Analytics report for a campaign.
:param campaign_id: The unique id for the campaign
:type campaign_id: :py:class:`str`
:param profile_id: The Google Analytics View ID
:type campaign_id: :py:class:`str`
:param queryparams:
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.campaign_id = campaign_id
self.profile_id = profile_id
return self._mc_client._get(url=self._build_path(campaign_id, 'google-analytics', profile_id), **queryparams)
| 35.095238 | 117 | 0.660787 |
79586fca6eed3fae558450921ec9b87a4a442562 | 6,193 | py | Python | src/sphinx_inline_tabs/_impl.py | christopherpickering/sphinx-inline-tabs | 8a36b887dfc4a14d781ab7acf50cb5e4cb737353 | [
"MIT"
] | 39 | 2020-10-14T13:55:49.000Z | 2022-01-30T10:28:33.000Z | src/sphinx_inline_tabs/_impl.py | christopherpickering/sphinx-inline-tabs | 8a36b887dfc4a14d781ab7acf50cb5e4cb737353 | [
"MIT"
] | 27 | 2020-10-14T21:41:31.000Z | 2022-03-12T22:12:47.000Z | src/sphinx_inline_tabs/_impl.py | christopherpickering/sphinx-inline-tabs | 8a36b887dfc4a14d781ab7acf50cb5e4cb737353 | [
"MIT"
] | 9 | 2020-11-15T01:17:53.000Z | 2021-12-16T14:34:13.000Z | """The actual implementation."""
import itertools
from typing import List, Literal
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.transforms.post_transforms import SphinxPostTransform
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import NodeMatcher
class TabContainer(nodes.container):
"""The initial tree-node for holding tab content."""
class _GeneralHTMLTagElement(nodes.Element, nodes.General):
@staticmethod
def visit(translator, node):
attributes = node.attributes.copy()
# Nobody needs this crap.
attributes.pop("ids")
attributes.pop("classes")
attributes.pop("names")
attributes.pop("dupnames")
attributes.pop("backrefs")
text = translator.starttag(node, node.tagname, **attributes)
translator.body.append(text.strip())
@staticmethod
def depart(translator, node):
if node.endtag:
translator.body.append(f"</{node.tagname}>")
class _TabInput(_GeneralHTMLTagElement):
tagname = "input"
endtag = False
class _TabLabel(_GeneralHTMLTagElement):
tagname = "label"
endtag = True
class TabDirective(SphinxDirective):
"""Tabbed content in Sphinx documentation."""
required_arguments = 1 # directive takes a single argument.
final_argument_whitespace = True # this allows that argument to contain spaces.
has_content = True
option_spec = {
"new-set": directives.flag,
}
def run(self):
"""Parse a tabs directive."""
self.assert_has_content()
container = TabContainer("", type="tab", new_set="new-set" in self.options)
self.set_source_info(container)
# Handle the label (non-plain-text variants allowed)
textnodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
# The signature of this object is:
# __init__(self, rawsource='', text='', *children, **attributes)
#
# We want to directly populate the children here.
label = nodes.label("", "", *textnodes)
# Handle the content
content = nodes.container("", is_div=True, classes=["tab-content"])
self.state.nested_parse(self.content, self.content_offset, content)
container += label
container += content
return [container]
class TabHtmlTransform(SphinxPostTransform):
"""Transform output of TabDirective into usable chunks."""
default_priority = 200
formats = ["html"]
def run(self):
"""Locate and replace `TabContainer`s."""
self.stack = [] # type: List[List[TabContainer]]
self.counter = itertools.count(start=0, step=1)
matcher = NodeMatcher(TabContainer)
for node in self.document.traverse(matcher): # type: TabContainer
self._process_one_node(node)
while self.stack:
tab_set = self.stack.pop()
self.finalize_set(tab_set, next(self.counter))
def _process_one_node(self, node: TabContainer):
# There is no existing tab set. Let's start a new one.
if not self.stack:
self.stack.append([node])
return
# There should never be an empty "current" tab set.
assert self.stack[-1]
close_till = None
append = False
for tab_set in reversed(self.stack[:]):
last_node = tab_set[-1]
# Is this node a direct child of the last node in this tab-set?
is_child = node in last_node.children[1]
if is_child:
close_till = tab_set
append = False
break
# Is this node a sibling of the last node in this tab-set?
is_sibling = (
node.parent == last_node.parent # same parent
# immediately after the previous node
and node.parent.index(last_node) + 1 == node.parent.index(node)
)
if is_sibling:
close_till = tab_set
append = True
break
# Close all tab sets as required.
if close_till is not None:
while self.stack[-1] != close_till:
self.finalize_set(self.stack.pop(), next(self.counter))
else:
while self.stack:
self.finalize_set(self.stack.pop(), next(self.counter))
# Start a new tab set, as required or if requested.
if append and not node["new_set"]:
assert self.stack
self.stack[-1].append(node)
else:
self.stack.append([node])
def finalize_set(self, tab_set: List[TabContainer], set_counter: int):
"""Add these TabContainers as a single-set-of-tabs."""
assert tab_set
parent = tab_set[0].parent
container = nodes.container("", is_div=True, classes=["tab-set"])
container.parent = parent
tab_set_name = f"tab-set--{set_counter}"
node_counter = 0
for node in tab_set:
node_counter += 1
tab_id = tab_set_name + f"-input--{node_counter}"
title, content = node.children
# <input>, for storing state in radio boxes.
input_node = _TabInput(
type="radio", ids=[tab_id], name=tab_set_name, classes=["tab-input"]
)
# <label>
label_node = _TabLabel(
"", *title.children, **{"for": tab_id}, classes=["tab-label"]
)
# For error messages
input_node.source = node.source
input_node.line = node.line
label_node.source = node.source
label_node.line = node.line
# Populate with the content.
container += input_node
container += label_node
container += content
container.children[0]["checked"] = True
# Replace all nodes in tab_set, with the container.
start_at = parent.index(tab_set[0])
end_at = parent.index(tab_set[-1])
parent.children = (
parent.children[:start_at] + [container] + parent[end_at + 1 :]
)
| 31.92268 | 84 | 0.600678 |
79586ff0799dc07d4160257366b5adfdde156420 | 3,526 | py | Python | pettingzoo/utils/wrappers/base.py | cclauss/PettingZoo | 4051996567fdec24f63203dc4445cc79405c01db | [
"Apache-2.0"
] | 1 | 2021-09-26T06:44:53.000Z | 2021-09-26T06:44:53.000Z | pettingzoo/utils/wrappers/base.py | cclauss/PettingZoo | 4051996567fdec24f63203dc4445cc79405c01db | [
"Apache-2.0"
] | null | null | null | pettingzoo/utils/wrappers/base.py | cclauss/PettingZoo | 4051996567fdec24f63203dc4445cc79405c01db | [
"Apache-2.0"
] | null | null | null | import warnings
from pettingzoo.utils.env import AECEnv
class BaseWrapper(AECEnv):
'''
Creates a wrapper around `env` parameter. Extend this class
to create a useful wrapper.
'''
def __init__(self, env):
super().__init__()
self.env = env
# try to access these parameters for backwards compatability
try:
self._observation_spaces = self.env.observation_spaces
self._action_spaces = self.env.action_spaces
except AttributeError:
pass
try:
self.possible_agents = self.env.possible_agents
except AttributeError:
pass
self.metadata = self.env.metadata
# we don't want these defined as we don't want them used before they are gotten
# self.agent_selection = self.env.agent_selection
# self.rewards = self.env.rewards
# self.dones = self.env.dones
# we don't want to care one way or the other whether environments have an infos or not before reset
try:
self.infos = self.env.infos
except AttributeError:
pass
# Not every environment has the .state_space attribute implemented
try:
self.state_space = self.env.state_space
except AttributeError:
pass
@property
def observation_spaces(self):
warnings.warn("The `observation_spaces` dictionary is deprecated. Use the `observation_space` function instead.")
try:
return self._observation_spaces
except AttributeError:
raise AttributeError("The base environment does not have an `observation_spaces` dict attribute. Use the environments `observation_space` method instead")
@property
def action_spaces(self):
warnings.warn("The `action_spaces` dictionary is deprecated. Use the `action_space` function instead.")
try:
return self._action_spaces
except AttributeError:
raise AttributeError("The base environment does not have an action_spaces dict attribute. Use the environments `action_space` method instead")
def observation_space(self, agent):
return self.env.observation_space(agent)
def action_space(self, agent):
return self.env.action_space(agent)
@property
def unwrapped(self):
return self.env.unwrapped
def seed(self, seed=None):
self.env.seed(seed)
def close(self):
self.env.close()
def render(self, mode='human'):
return self.env.render(mode)
def reset(self):
self.env.reset()
self.agent_selection = self.env.agent_selection
self.rewards = self.env.rewards
self.dones = self.env.dones
self.infos = self.env.infos
self.agents = self.env.agents
self._cumulative_rewards = self.env._cumulative_rewards
def observe(self, agent):
return self.env.observe(agent)
def state(self):
return self.env.state()
def step(self, action):
self.env.step(action)
self.agent_selection = self.env.agent_selection
self.rewards = self.env.rewards
self.dones = self.env.dones
self.infos = self.env.infos
self.agents = self.env.agents
self._cumulative_rewards = self.env._cumulative_rewards
def __str__(self):
'''
returns a name which looks like: "max_observation<space_invaders_v1>"
'''
return f'{type(self).__name__}<{str(self.env)}>'
| 30.66087 | 166 | 0.646058 |
7958720c45b2c8a25190168e4117d39d898152ab | 3,499 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/island_count.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/island_count.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/island_count.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | """
Remember these steps to solve almost any graphs problem:
- Translate the problem into terminology you've learned this week
- Build your graph
- Traverse your graph
ISLANDS MATRIX CHALLENGE!
--------------------------
Write a function that takes a 2D binary array and returns the number of 1 islands.
An island consists of 1s that are connected to the north, south, east or west. For example:
islands = [[0, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0]]
island_counter(islands) # returns 4
traversal (define a function) => dft(row, col, matrix, visited) => returns visited
get neighbors (define function) => get_nieghbors(col, row, matrix) => check north south east and west for connections / x, y / col / row
each island is a vertex
each connection of north, south, east or west (edge)
"""
class Stack:
def __init__(self):
self.stack = []
def push(self, value):
self.stack.append(value)
def pop(self):
if self.size() > 0:
return self.stack.pop()
else:
return None
def size(self):
return len(self.stack)
def dft(row, col, matrix, visited):
s = Stack()
s.push((row, col))
while s.size() > 0:
v = s.pop()
row = v[0]
col = v[1]
if not visited[row][col]:
visited[row][col] = True
for neighbor in get_neighbors(col, row, matrix):
s.push(neighbor)
return visited
def get_neighbors(col, row, matrix):
neighbors = []
# check north
if row > 0 and matrix[row - 1][col] == 1:
neighbors.append((row - 1, col))
# check south
if row < len(matrix) - 1 and matrix[row + 1][col] == 1:
neighbors.append((row + 1, col))
# check east
if col < len(matrix[0]) - 1 and matrix[row][col + 1] == 1:
neighbors.append((row, col + 1))
# check west
if col > 0 and matrix[row][col - 1] == 1:
neighbors.append((row, col - 1))
return neighbors
def island_counter(matrix):
# create a visited matrix
counter_of_islands = 0
visited = []
for _ in range(len(matrix)):
visited.append([False] * len(matrix[0]))
# walk through each of the cels in the matrix
for col in range(len(matrix[0])):
for row in range(len(matrix)):
# if not visited
if not visited[row][col]:
# when we reach a 1
if matrix[row][col] == 1:
# do a dft and mark each as visited
visited = dft(row, col, matrix, visited)
# increment a counter by 1
counter_of_islands += 1
else:
visited[row][col] = True
return counter_of_islands
if __name__ == "__main__":
islands = [
[0, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
]
print(island_counter(islands)) # 4
islands = [
[1, 0, 0, 1, 1, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 0, 0, 1, 0],
]
print(island_counter(islands)) # 13
| 27.124031 | 136 | 0.50986 |
795872d7ca9f70d6364d5f1c090c85806dd950fc | 884 | py | Python | app.py | evanscottgray/cst_server | cdbc6d2358a41071b173f501ee7991763218e2aa | [
"MIT"
] | null | null | null | app.py | evanscottgray/cst_server | cdbc6d2358a41071b173f501ee7991763218e2aa | [
"MIT"
] | null | null | null | app.py | evanscottgray/cst_server | cdbc6d2358a41071b173f501ee7991763218e2aa | [
"MIT"
] | null | null | null | from flask import Flask
from flask import abort, request, jsonify
from flask.ext.cors import CORS
import subprocess
import utils
import os.path
app = Flask(__name__)
cors = CORS(app)
def get_token():
cmd = app.config.get('token_command')
p = subprocess.Popen([cmd], stdout=subprocess.PIPE)
s = p.communicate()[0]
return s.strip('\n')
def get_user():
u = app.config.get('user')
return u
@app.route('/token')
def token():
k = request.args.get('key')
if k != app.config.get('api_key'):
abort(403)
return jsonify({'token': get_token(), 'user': get_user()}), 200
if __name__ == '__main__':
crt = os.path.join(os.path.dirname(__file__), 'ssl/server.crt')
key = os.path.join(os.path.dirname(__file__), 'ssl/server.key')
app.config.update(utils.parse_config())
app.run(host='127.0.0.1', port=15000, ssl_context=(crt, key))
| 23.891892 | 67 | 0.658371 |
7958755627afc3e2b9162042e3ac700fff2f23bd | 2,637 | py | Python | tests/basics/ListContractions.py | hclivess/Nuitka | 9c7ec9696e69a3901b25d5bce720c921d45c931b | [
"Apache-2.0"
] | null | null | null | tests/basics/ListContractions.py | hclivess/Nuitka | 9c7ec9696e69a3901b25d5bce720c921d45c931b | [
"Apache-2.0"
] | 1 | 2019-03-01T11:33:40.000Z | 2019-03-01T11:33:40.000Z | tests/basics/ListContractions.py | hclivess/Nuitka | 9c7ec9696e69a3901b25d5bce720c921d45c931b | [
"Apache-2.0"
] | 1 | 2019-03-26T16:56:21.000Z | 2019-03-26T16:56:21.000Z | # Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def displayDict(d):
result = '{'
for key, value in sorted(d.items()):
result += "%s: %s" % (key, value)
result += '}'
print("List contraction on the module level:")
x = [(u if u%2==0 else 0) for u in range(10)]
print(x)
print("List contraction on the function level:")
def someFunction():
x = [(u if u%2==0 else 0) for u in range(10)]
print(x)
someFunction()
print("List contractions with no, 1 one 2 conditions:")
def otherFunction():
print([ x for x in range(8) ])
print([ x for x in range(8) if x % 2 == 1 ])
print([ x for x in range(8) if x % 2 == 1 if x > 4 ])
otherFunction()
print("Complex list contractions with more than one for:")
def complexContractions():
print([ (x,y) for x in range(3) for y in range(5) ])
seq = range(3)
res = [(i, j, k) for i in iter(seq) for j in iter(seq) for k in iter(seq)]
print(res)
complexContractions()
print("Contraction for 2 for statements and one final if referring to first for:")
def trickyContraction():
class Range:
def __init__(self, value):
self.value = value
def __iter__(self):
print("Giving range iter to", self.value)
return iter(range(self.value))
def Cond(y):
print("Checking against", y)
return y == 1
r = [ (x,z,y) for x in Range(3) for z in Range(2) for y in Range(4) if Cond(y) ]
print("result is", r)
trickyContraction()
def lambdaWithcontraction(x):
l = lambda x : [ z for z in range(x) ]
r = l(x)
print("Lambda contraction locals:", displayDict(locals()))
lambdaWithcontraction(3)
print("Contraction that gets a 'del' on the iterator variable:", end = ' ')
def allowedDelOnIteratorVariable(z):
x = 2
del x
return [ x*z for x in range(z) ]
print(allowedDelOnIteratorVariable(3))
| 28.053191 | 84 | 0.648085 |
795876ccfa5729c343d421be5d4c81a76e685f24 | 518 | py | Python | utils.py | maxcrous/magnify_motion | 70a3a61f67beacf249ba7023a4760c056be6c54a | [
"MIT"
] | 3 | 2021-12-01T13:55:14.000Z | 2022-01-19T14:37:40.000Z | utils.py | maxcrous/magnify_motion | 70a3a61f67beacf249ba7023a4760c056be6c54a | [
"MIT"
] | null | null | null | utils.py | maxcrous/magnify_motion | 70a3a61f67beacf249ba7023a4760c056be6c54a | [
"MIT"
] | 3 | 2021-01-05T03:07:10.000Z | 2021-12-01T13:10:40.000Z | import matplotlib.pyplot as plt
import skvideo.io
def plot_slice(vid, output_path):
""" Save a spatiotemporal slice of a video to disk. """
frames, height, width, channels = vid.shape
patch_seq = vid[:, :, width//2, :]
plt.imshow(patch_seq.swapaxes(0, 1))
plt.savefig(output_path)
def get_fps(vid_path):
""" Get the frames per second of a video. """
metadata = skvideo.io.ffprobe(vid_path)
fps = metadata['video']['@r_frame_rate'].split('/')[0]
fps = float(fps)
return fps
| 25.9 | 59 | 0.656371 |
795876defcb9d6d4e3156e6e80c5a60e53503bf4 | 31,943 | py | Python | tests/wallet/rpc/test_wallet_rpc.py | Hydrangea-Network/hydrangea-blockchain | d15662329958dbdaa9cbd99733ba729f0e74ce54 | [
"Apache-2.0"
] | 1 | 2022-03-15T06:41:49.000Z | 2022-03-15T06:41:49.000Z | tests/wallet/rpc/test_wallet_rpc.py | Hydrangea-Network/hydrangea-blockchain | d15662329958dbdaa9cbd99733ba729f0e74ce54 | [
"Apache-2.0"
] | null | null | null | tests/wallet/rpc/test_wallet_rpc.py | Hydrangea-Network/hydrangea-blockchain | d15662329958dbdaa9cbd99733ba729f0e74ce54 | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
from operator import attrgetter
from typing import Dict, Optional
import pytest
from blspy import G2Element
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.rpc.full_node_rpc_api import FullNodeRpcApi
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.rpc.rpc_server import start_rpc_server
from chia.rpc.wallet_rpc_api import WalletRpcApi
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.simulator.simulator_protocol import FarmNewBlockProtocol
from chia.types.announcement import Announcement
from chia.types.blockchain_format.program import Program
from chia.types.coin_record import CoinRecord
from chia.types.coin_spend import CoinSpend
from chia.types.peer_info import PeerInfo
from chia.types.spend_bundle import SpendBundle
from chia.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from chia.util.config import lock_and_load_config, save_config
from chia.util.hash import std_hash
from chia.util.ints import uint16, uint32, uint64
from chia.wallet.cat_wallet.cat_constants import DEFAULT_CATS
from chia.wallet.cat_wallet.cat_wallet import CATWallet
from chia.wallet.derive_keys import master_sk_to_wallet_sk
from chia.wallet.trading.trade_status import TradeStatus
from chia.wallet.transaction_record import TransactionRecord
from chia.wallet.transaction_sorting import SortKey
from chia.wallet.util.compute_memos import compute_memos
from chia.wallet.util.wallet_types import WalletType
from tests.pools.test_pool_rpc import wallet_is_synced
from tests.time_out_assert import time_out_assert
from tests.util.socket import find_available_listen_port
log = logging.getLogger(__name__)
async def assert_wallet_types(client: WalletRpcClient, expected: Dict[WalletType, int]) -> None:
for wallet_type in WalletType:
wallets = await client.get_wallets(wallet_type)
wallet_count = len(wallets)
if wallet_type in expected:
assert wallet_count == expected.get(wallet_type, 0)
for wallet in wallets:
assert wallet["type"] == wallet_type.value
class TestWalletRpc:
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_wallet_rpc(self, two_wallet_nodes, trusted, bt, self_hostname):
test_rpc_port = find_available_listen_port()
test_rpc_port_2 = find_available_listen_port()
test_rpc_port_node = find_available_listen_port()
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
ph_2 = await wallet_2.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
initial_funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
initial_funds_eventually = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
wallet_rpc_api = WalletRpcApi(wallet_node)
wallet_rpc_api_2 = WalletRpcApi(wallet_node_2)
config = bt.config
hostname = config["self_hostname"]
daemon_port = config["daemon_port"]
def stop_node_cb():
pass
full_node_rpc_api = FullNodeRpcApi(full_node_api.full_node)
rpc_cleanup_node = await start_rpc_server(
full_node_rpc_api,
hostname,
daemon_port,
test_rpc_port_node,
stop_node_cb,
bt.root_path,
config,
connect_to_daemon=False,
)
rpc_cleanup = await start_rpc_server(
wallet_rpc_api,
hostname,
daemon_port,
test_rpc_port,
stop_node_cb,
bt.root_path,
config,
connect_to_daemon=False,
)
rpc_cleanup_2 = await start_rpc_server(
wallet_rpc_api_2,
hostname,
daemon_port,
test_rpc_port_2,
stop_node_cb,
bt.root_path,
config,
connect_to_daemon=False,
)
await time_out_assert(5, wallet.get_confirmed_balance, initial_funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, initial_funds)
client = await WalletRpcClient.create(hostname, test_rpc_port, bt.root_path, config)
client_2 = await WalletRpcClient.create(hostname, test_rpc_port_2, bt.root_path, config)
client_node = await FullNodeRpcClient.create(hostname, test_rpc_port_node, bt.root_path, config)
try:
await assert_wallet_types(client, {WalletType.STANDARD_WALLET: 1})
await assert_wallet_types(client_2, {WalletType.STANDARD_WALLET: 1})
await time_out_assert(5, client.get_synced)
addr = encode_puzzle_hash(await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(), "txhg")
tx_amount = 15600000
try:
await client.send_transaction("1", 100000000000000001, addr)
raise Exception("Should not create high value tx")
except ValueError:
pass
# Tests sending a basic transaction
tx = await client.send_transaction("1", tx_amount, addr, memos=["this is a basic tx"])
transaction_id = tx.name
async def tx_in_mempool():
tx = await client.get_transaction("1", transaction_id)
return tx.is_in_mempool()
await time_out_assert(5, tx_in_mempool, True)
await time_out_assert(5, wallet.get_unconfirmed_balance, initial_funds - tx_amount)
assert (await client.get_wallet_balance("1"))["unconfirmed_wallet_balance"] == initial_funds - tx_amount
assert (await client.get_wallet_balance("1"))["confirmed_wallet_balance"] == initial_funds
for i in range(0, 5):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
async def eventual_balance():
return (await client.get_wallet_balance("1"))["confirmed_wallet_balance"]
async def eventual_balance_det(c, wallet_id: str):
return (await c.get_wallet_balance(wallet_id))["confirmed_wallet_balance"]
await time_out_assert(5, wallet_is_synced, True, wallet_node, full_node_api)
# Checks that the memo can be retrieved
tx_confirmed = await client.get_transaction("1", transaction_id)
assert tx_confirmed.confirmed
assert len(tx_confirmed.get_memos()) == 1
assert [b"this is a basic tx"] in tx_confirmed.get_memos().values()
assert list(tx_confirmed.get_memos().keys())[0] in [a.name() for a in tx.spend_bundle.additions()]
await time_out_assert(5, eventual_balance, initial_funds_eventually - tx_amount)
# Tests offline signing
ph_3 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph_4 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph_5 = await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash()
# Test basic transaction to one output and coin announcement
signed_tx_amount = 888000
tx_coin_announcements = [
Announcement(
std_hash(b"coin_id_1"),
std_hash(b"message"),
b"\xca",
),
Announcement(
std_hash(b"coin_id_2"),
bytes(Program.to("a string")),
),
]
tx_res: TransactionRecord = await client.create_signed_transaction(
[{"amount": signed_tx_amount, "puzzle_hash": ph_3}], coin_announcements=tx_coin_announcements
)
assert tx_res.fee_amount == 0
assert tx_res.amount == signed_tx_amount
assert len(tx_res.additions) == 2 # The output and the change
assert any([addition.amount == signed_tx_amount for addition in tx_res.additions])
# check error for a ASSERT_ANNOUNCE_CONSUMED_FAILED and if the error is not there throw a value error
try:
push_res = await client_node.push_tx(tx_res.spend_bundle)
except ValueError as error:
error_string = error.args[0]["error"] # noqa: # pylint: disable=E1126
if error_string.find("ASSERT_ANNOUNCE_CONSUMED_FAILED") == -1:
raise ValueError from error
# # Test basic transaction to one output and puzzle announcement
signed_tx_amount = 888000
tx_puzzle_announcements = [
Announcement(
std_hash(b"puzzle_hash_1"),
b"message",
b"\xca",
),
Announcement(
std_hash(b"puzzle_hash_2"),
bytes(Program.to("a string")),
),
]
tx_res: TransactionRecord = await client.create_signed_transaction(
[{"amount": signed_tx_amount, "puzzle_hash": ph_3}], puzzle_announcements=tx_puzzle_announcements
)
assert tx_res.fee_amount == 0
assert tx_res.amount == signed_tx_amount
assert len(tx_res.additions) == 2 # The output and the change
assert any([addition.amount == signed_tx_amount for addition in tx_res.additions])
# check error for a ASSERT_ANNOUNCE_CONSUMED_FAILED and if the error is not there throw a value error
try:
push_res = await client_node.push_tx(tx_res.spend_bundle)
except ValueError as error:
error_string = error.args[0]["error"] # noqa: # pylint: disable=E1126
if error_string.find("ASSERT_ANNOUNCE_CONSUMED_FAILED") == -1:
raise ValueError from error
# Test basic transaction to one output
signed_tx_amount = 888000
tx_res: TransactionRecord = await client.create_signed_transaction(
[{"amount": signed_tx_amount, "puzzle_hash": ph_3, "memos": ["My memo"]}]
)
assert tx_res.fee_amount == 0
assert tx_res.amount == signed_tx_amount
assert len(tx_res.additions) == 2 # The output and the change
assert any([addition.amount == signed_tx_amount for addition in tx_res.additions])
push_res = await client.push_tx(tx_res.spend_bundle)
assert push_res["success"]
assert (await client.get_wallet_balance("1"))[
"confirmed_wallet_balance"
] == initial_funds_eventually - tx_amount
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
await time_out_assert(5, eventual_balance, initial_funds_eventually - tx_amount - signed_tx_amount)
# Test transaction to two outputs, from a specified coin, with a fee
coin_to_spend = None
for addition in tx_res.additions:
if addition.amount != signed_tx_amount:
coin_to_spend = addition
assert coin_to_spend is not None
tx_res = await client.create_signed_transaction(
[{"amount": 444, "puzzle_hash": ph_4, "memos": ["hhh"]}, {"amount": 999, "puzzle_hash": ph_5}],
coins=[coin_to_spend],
fee=100,
)
assert tx_res.fee_amount == 100
assert tx_res.amount == 444 + 999
assert len(tx_res.additions) == 3 # The outputs and the change
assert any([addition.amount == 444 for addition in tx_res.additions])
assert any([addition.amount == 999 for addition in tx_res.additions])
assert sum([rem.amount for rem in tx_res.removals]) - sum([ad.amount for ad in tx_res.additions]) == 100
push_res = await client_node.push_tx(tx_res.spend_bundle)
assert push_res["success"]
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
found: bool = False
for addition in tx_res.spend_bundle.additions():
if addition.amount == 444:
cr: Optional[CoinRecord] = await client_node.get_coin_record_by_name(addition.name())
assert cr is not None
spend: CoinSpend = await client_node.get_puzzle_and_solution(
addition.parent_coin_info, cr.confirmed_block_index
)
sb: SpendBundle = SpendBundle([spend], G2Element())
assert compute_memos(sb) == {addition.name(): [b"hhh"]}
found = True
assert found
new_balance = initial_funds_eventually - tx_amount - signed_tx_amount - 444 - 999 - 100
await time_out_assert(5, eventual_balance, new_balance)
send_tx_res: TransactionRecord = await client.send_transaction_multi(
"1",
[
{"amount": 555, "puzzle_hash": ph_4, "memos": ["FiMemo"]},
{"amount": 666, "puzzle_hash": ph_5, "memos": ["SeMemo"]},
],
fee=200,
)
assert send_tx_res is not None
assert send_tx_res.fee_amount == 200
assert send_tx_res.amount == 555 + 666
assert len(send_tx_res.additions) == 3 # The outputs and the change
assert any([addition.amount == 555 for addition in send_tx_res.additions])
assert any([addition.amount == 666 for addition in send_tx_res.additions])
assert (
sum([rem.amount for rem in send_tx_res.removals]) - sum([ad.amount for ad in send_tx_res.additions])
== 200
)
await asyncio.sleep(3)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
new_balance = new_balance - 555 - 666 - 200
await time_out_assert(5, eventual_balance, new_balance)
address = await client.get_next_address("1", True)
assert len(address) > 10
transactions = await client.get_transactions("1")
assert len(transactions) > 1
all_transactions = await client.get_transactions("1")
# Test transaction pagination
some_transactions = await client.get_transactions("1", 0, 5)
some_transactions_2 = await client.get_transactions("1", 5, 10)
assert some_transactions == all_transactions[0:5]
assert some_transactions_2 == all_transactions[5:10]
# Testing sorts
# Test the default sort (CONFIRMED_AT_HEIGHT)
assert all_transactions == sorted(all_transactions, key=attrgetter("confirmed_at_height"))
all_transactions = await client.get_transactions("1", reverse=True)
assert all_transactions == sorted(all_transactions, key=attrgetter("confirmed_at_height"), reverse=True)
# Test RELEVANCE
await client.send_transaction("1", 1, encode_puzzle_hash(ph_2, "txhg")) # Create a pending tx
all_transactions = await client.get_transactions("1", sort_key=SortKey.RELEVANCE)
sorted_transactions = sorted(all_transactions, key=attrgetter("created_at_time"), reverse=True)
sorted_transactions = sorted(sorted_transactions, key=attrgetter("confirmed_at_height"), reverse=True)
sorted_transactions = sorted(sorted_transactions, key=attrgetter("confirmed"))
assert all_transactions == sorted_transactions
all_transactions = await client.get_transactions("1", sort_key=SortKey.RELEVANCE, reverse=True)
sorted_transactions = sorted(all_transactions, key=attrgetter("created_at_time"))
sorted_transactions = sorted(sorted_transactions, key=attrgetter("confirmed_at_height"))
sorted_transactions = sorted(sorted_transactions, key=attrgetter("confirmed"), reverse=True)
assert all_transactions == sorted_transactions
# Checks that the memo can be retrieved
tx_confirmed = await client.get_transaction("1", send_tx_res.name)
assert tx_confirmed.confirmed
if isinstance(tx_confirmed, SpendBundle):
memos = compute_memos(tx_confirmed)
else:
memos = tx_confirmed.get_memos()
assert len(memos) == 2
print(memos)
assert [b"FiMemo"] in memos.values()
assert [b"SeMemo"] in memos.values()
assert list(memos.keys())[0] in [a.name() for a in send_tx_res.spend_bundle.additions()]
assert list(memos.keys())[1] in [a.name() for a in send_tx_res.spend_bundle.additions()]
# Test get_transactions to address
ph_by_addr = await wallet.get_new_puzzlehash()
await client.send_transaction("1", 1, encode_puzzle_hash(ph_by_addr, "txhg"))
await client.farm_block(encode_puzzle_hash(ph_by_addr, "txhg"))
await time_out_assert(10, wallet_is_synced, True, wallet_node, full_node_api)
tx_for_address = await wallet_rpc_api.get_transactions(
{"wallet_id": "1", "to_address": encode_puzzle_hash(ph_by_addr, "txhg")}
)
assert len(tx_for_address["transactions"]) == 1
assert decode_puzzle_hash(tx_for_address["transactions"][0]["to_address"]) == ph_by_addr
# Test coin selection
selected_coins = await client.select_coins(amount=1, wallet_id=1)
assert len(selected_coins) > 0
##############
# CATS #
##############
# Creates a CAT wallet with 100 mojos and a CAT with 20 mojos
await client.create_new_cat_and_wallet(100)
res = await client.create_new_cat_and_wallet(20)
assert res["success"]
cat_0_id = res["wallet_id"]
asset_id = bytes.fromhex(res["asset_id"])
assert len(asset_id) > 0
await assert_wallet_types(client, {WalletType.STANDARD_WALLET: 1, WalletType.CAT: 2})
await assert_wallet_types(client_2, {WalletType.STANDARD_WALLET: 1})
bal_0 = await client.get_wallet_balance(cat_0_id)
assert bal_0["confirmed_wallet_balance"] == 0
assert bal_0["pending_coin_removal_count"] == 1
col = await client.get_cat_asset_id(cat_0_id)
assert col == asset_id
assert (await client.get_cat_name(cat_0_id)) == CATWallet.default_wallet_name_for_unknown_cat(
asset_id.hex()
)
await client.set_cat_name(cat_0_id, "My cat")
assert (await client.get_cat_name(cat_0_id)) == "My cat"
wid, name = await client.cat_asset_id_to_name(col)
assert wid == cat_0_id
assert name == "My cat"
should_be_none = await client.cat_asset_id_to_name(bytes([0] * 32))
assert should_be_none is None
verified_asset_id = next(iter(DEFAULT_CATS.items()))[1]["asset_id"]
should_be_none, name = await client.cat_asset_id_to_name(bytes.fromhex(verified_asset_id))
assert should_be_none is None
assert name == next(iter(DEFAULT_CATS.items()))[1]["name"]
await asyncio.sleep(1)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
await time_out_assert(10, eventual_balance_det, 20, client, cat_0_id)
bal_0 = await client.get_wallet_balance(cat_0_id)
assert bal_0["pending_coin_removal_count"] == 0
assert bal_0["unspent_coin_count"] == 1
# Creates a second wallet with the same CAT
res = await client_2.create_wallet_for_existing_cat(asset_id)
assert res["success"]
cat_1_id = res["wallet_id"]
cat_1_asset_id = bytes.fromhex(res["asset_id"])
assert cat_1_asset_id == asset_id
await assert_wallet_types(client, {WalletType.STANDARD_WALLET: 1, WalletType.CAT: 2})
await assert_wallet_types(client_2, {WalletType.STANDARD_WALLET: 1, WalletType.CAT: 1})
await asyncio.sleep(1)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
bal_1 = await client_2.get_wallet_balance(cat_1_id)
assert bal_1["confirmed_wallet_balance"] == 0
addr_0 = await client.get_next_address(cat_0_id, False)
addr_1 = await client_2.get_next_address(cat_1_id, False)
assert addr_0 != addr_1
await client.cat_spend(cat_0_id, 4, addr_1, 0, ["the cat memo"])
await asyncio.sleep(1)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
# Test unacknowledged CAT
await wallet_node.wallet_state_manager.interested_store.add_unacknowledged_token(
asset_id, "Unknown", uint32(10000), bytes.fromhex("ABCD")
)
cats = await client.get_stray_cats()
assert len(cats) == 1
await time_out_assert(10, eventual_balance_det, 16, client, cat_0_id)
await time_out_assert(10, eventual_balance_det, 4, client_2, cat_1_id)
# Test CAT coin selection
selected_coins = await client.select_coins(amount=1, wallet_id=cat_0_id)
assert len(selected_coins) > 0
##########
# Offers #
##########
# Create an offer of 5 chia for one CAT
offer, trade_record = await client.create_offer_for_ids({uint32(1): -5, cat_0_id: 1}, validate_only=True)
all_offers = await client.get_all_offers()
assert len(all_offers) == 0
assert offer is None
offer, trade_record = await client.create_offer_for_ids({uint32(1): -5, cat_0_id: 1}, fee=uint64(1))
summary = await client.get_offer_summary(offer)
assert summary == {"offered": {"xhg": 5}, "requested": {col.hex(): 1}, "fees": 1}
assert await client.check_offer_validity(offer)
all_offers = await client.get_all_offers(file_contents=True)
assert len(all_offers) == 1
assert TradeStatus(all_offers[0].status) == TradeStatus.PENDING_ACCEPT
assert all_offers[0].offer == bytes(offer)
trade_record = await client_2.take_offer(offer, fee=uint64(1))
assert TradeStatus(trade_record.status) == TradeStatus.PENDING_CONFIRM
await client.cancel_offer(offer.name(), secure=False)
trade_record = await client.get_offer(offer.name(), file_contents=True)
assert trade_record.offer == bytes(offer)
assert TradeStatus(trade_record.status) == TradeStatus.CANCELLED
await client.cancel_offer(offer.name(), fee=uint64(1), secure=True)
trade_record = await client.get_offer(offer.name())
assert TradeStatus(trade_record.status) == TradeStatus.PENDING_CANCEL
new_offer, new_trade_record = await client.create_offer_for_ids({uint32(1): -5, cat_0_id: 1}, fee=uint64(1))
all_offers = await client.get_all_offers()
assert len(all_offers) == 2
await asyncio.sleep(1)
for i in range(0, 5):
await client.farm_block(encode_puzzle_hash(ph_2, "txhg"))
await asyncio.sleep(0.5)
async def is_trade_confirmed(client, trade) -> bool:
trade_record = await client.get_offer(trade.name())
return TradeStatus(trade_record.status) == TradeStatus.CONFIRMED
await time_out_assert(15, is_trade_confirmed, True, client, offer)
# Test trade sorting
def only_ids(trades):
return [t.trade_id for t in trades]
trade_record = await client.get_offer(offer.name())
all_offers = await client.get_all_offers(include_completed=True) # confirmed at index descending
assert len(all_offers) == 2
assert only_ids(all_offers) == only_ids([trade_record, new_trade_record])
all_offers = await client.get_all_offers(
include_completed=True, reverse=True
) # confirmed at index ascending
assert only_ids(all_offers) == only_ids([new_trade_record, trade_record])
all_offers = await client.get_all_offers(include_completed=True, sort_key="RELEVANCE") # most relevant
assert only_ids(all_offers) == only_ids([new_trade_record, trade_record])
all_offers = await client.get_all_offers(
include_completed=True, sort_key="RELEVANCE", reverse=True
) # least relevant
assert only_ids(all_offers) == only_ids([trade_record, new_trade_record])
# Test pagination
all_offers = await client.get_all_offers(include_completed=True, start=0, end=1)
assert len(all_offers) == 1
all_offers = await client.get_all_offers(include_completed=True, start=50)
assert len(all_offers) == 0
all_offers = await client.get_all_offers(include_completed=True, start=0, end=50)
assert len(all_offers) == 2
# Keys and addresses
address = await client.get_next_address("1", True)
assert len(address) > 10
all_transactions = await client.get_transactions("1")
some_transactions = await client.get_transactions("1", 0, 5)
some_transactions_2 = await client.get_transactions("1", 5, 10)
assert len(all_transactions) > 1
assert some_transactions == all_transactions[0:5]
assert some_transactions_2 == all_transactions[5:10]
transaction_count = await client.get_transaction_count("1")
assert transaction_count == len(all_transactions)
pks = await client.get_public_keys()
assert len(pks) == 1
assert (await client.get_height_info()) > 0
created_tx = await client.send_transaction("1", tx_amount, addr)
async def tx_in_mempool_2():
tx = await client.get_transaction("1", created_tx.name)
return tx.is_in_mempool()
await time_out_assert(5, tx_in_mempool_2, True)
assert len(await wallet.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(1)) == 1
await client.delete_unconfirmed_transactions("1")
assert len(await wallet.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(1)) == 0
sk_dict = await client.get_private_key(pks[0])
assert sk_dict["fingerprint"] == pks[0]
assert sk_dict["sk"] is not None
assert sk_dict["pk"] is not None
assert sk_dict["seed"] is not None
mnemonic = await client.generate_mnemonic()
assert len(mnemonic) == 24
await client.add_key(mnemonic)
pks = await client.get_public_keys()
assert len(pks) == 2
await client.log_in(pks[1])
sk_dict = await client.get_private_key(pks[1])
assert sk_dict["fingerprint"] == pks[1]
# Add in reward addresses into farmer and pool for testing delete key checks
# set farmer to first private key
sk = await wallet_node.get_key_for_fingerprint(pks[0])
test_ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1())
with lock_and_load_config(wallet_node.root_path, "config.yaml") as test_config:
test_config["farmer"]["xhg_target_address"] = encode_puzzle_hash(test_ph, "txhg")
# set pool to second private key
sk = await wallet_node.get_key_for_fingerprint(pks[1])
test_ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1())
test_config["pool"]["xhg_target_address"] = encode_puzzle_hash(test_ph, "txhg")
save_config(wallet_node.root_path, "config.yaml", test_config)
# Check first key
sk_dict = await client.check_delete_key(pks[0])
assert sk_dict["fingerprint"] == pks[0]
assert sk_dict["used_for_farmer_rewards"] is True
assert sk_dict["used_for_pool_rewards"] is False
# Check second key
sk_dict = await client.check_delete_key(pks[1])
assert sk_dict["fingerprint"] == pks[1]
assert sk_dict["used_for_farmer_rewards"] is False
assert sk_dict["used_for_pool_rewards"] is True
# Check unknown key
sk_dict = await client.check_delete_key(123456)
assert sk_dict["fingerprint"] == 123456
assert sk_dict["used_for_farmer_rewards"] is False
assert sk_dict["used_for_pool_rewards"] is False
await client.delete_key(pks[0])
await client.log_in(pks[1])
assert len(await client.get_public_keys()) == 1
assert not (await client.get_sync_status())
wallets = await client.get_wallets()
assert len(wallets) == 1
balance = await client.get_wallet_balance(wallets[0]["id"])
assert balance["unconfirmed_wallet_balance"] == 0
try:
await client.send_transaction(wallets[0]["id"], 100, addr)
raise Exception("Should not create tx if no balance")
except ValueError:
pass
# Delete all keys
await client.delete_all_keys()
assert len(await client.get_public_keys()) == 0
finally:
# Checks that the RPC manages to stop the node
client.close()
client_2.close()
client_node.close()
await client.await_closed()
await client_2.await_closed()
await client_node.await_closed()
await rpc_cleanup()
await rpc_cleanup_2()
await rpc_cleanup_node()
| 46.906021 | 120 | 0.630091 |
79587701a1d9486c63469d8d89087654c3744497 | 3,120 | py | Python | docs/examples/data_access/sqlalchemy/sqlalchemy_example/test_authorization.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 2,167 | 2020-07-28T15:49:48.000Z | 2022-03-31T06:11:28.000Z | docs/examples/data_access/sqlalchemy/sqlalchemy_example/test_authorization.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 1,060 | 2020-07-25T18:37:07.000Z | 2022-03-30T05:49:44.000Z | docs/examples/data_access/sqlalchemy/sqlalchemy_example/test_authorization.py | connec/oso | a12d94206807b69beb6fe7a9070b9afcacdfc845 | [
"Apache-2.0"
] | 118 | 2020-08-05T19:27:14.000Z | 2022-03-31T16:37:39.000Z | from pathlib import Path
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from oso import Oso
from sqlalchemy_oso.session import scoped_session
from sqlalchemy_oso.auth import register_models
from .models import Post, User, Model
POLICY_FILE = Path(__file__).absolute().parent / 'policy.polar'
@pytest.fixture
def engine():
engine = create_engine('sqlite:///:memory:')
Model.metadata.create_all(engine)
return engine
@pytest.fixture
def authorization_data():
return {
'user': None,
'action': "read"
}
@pytest.fixture
def session(engine, oso, authorization_data):
return scoped_session(
bind=engine,
get_oso=lambda: oso,
get_user=lambda: authorization_data['user'],
get_action=lambda: authorization_data['action']
)
@pytest.fixture
def oso():
return Oso()
@pytest.fixture
def policy(oso):
register_models(oso, Model)
oso.load_file(POLICY_FILE)
@pytest.fixture
def test_data(engine):
session = Session(bind=engine, expire_on_commit=False)
user = User(username='user')
manager = User(username='manager', manages=[user])
public_user_post = Post(contents='public_user_post',
access_level='public',
created_by=user)
private_user_post = Post(contents='private_user_post',
access_level='private',
created_by=user)
private_manager_post = Post(contents='private manager post',
access_level='private',
created_by=manager)
public_manager_post = Post(contents='public manager post',
access_level='public',
created_by=manager)
models = {name: value for name, value in locals().items() if isinstance(value, Model)}
for instance in models.values():
session.add(instance)
session.commit()
# Prevent session from being destroyed.
models['_session'] = session
return models
def test_basic(oso, policy, session, test_data, authorization_data):
authorization_data['user'] = test_data['user']
posts = session.query(Post)
assert posts.count() == 3
posts = [p.id for p in posts.all()]
assert test_data['public_user_post'].id in posts
assert test_data['private_user_post'].id in posts
assert test_data['public_manager_post'].id in posts
def test_manages(oso, policy, session, test_data, authorization_data):
authorization_data['user'] = test_data['manager']
posts = session.query(Post)
assert posts.count() == 4
posts = [p.id for p in posts.all()]
assert test_data['public_user_post'].id in posts
assert test_data['private_user_post'].id in posts
assert test_data['public_manager_post'].id in posts
assert test_data['private_manager_post'].id in posts
def test_user_access(oso, policy, session, test_data, authorization_data):
authorization_data['user'] = test_data['user']
users = session.query(User)
assert users.count() == 2
| 30.291262 | 90 | 0.664744 |
7958775c4147d26b973e02ea2f723f9988e9dbbb | 322 | py | Python | src/encoders/__init__.py | ligerfotis/semantic-code-search_gpt2_tf | 5eff2ceda0789ee265ad08b742f0d728884e3df8 | [
"MIT"
] | 1 | 2020-09-12T04:03:12.000Z | 2020-09-12T04:03:12.000Z | src/encoders/__init__.py | ligerfotis/semantic-code-search_gpt2_tf | 5eff2ceda0789ee265ad08b742f0d728884e3df8 | [
"MIT"
] | 1 | 2022-03-03T17:41:25.000Z | 2022-03-16T01:11:02.000Z | src/encoders/__init__.py | ligerfotis/semantic-code-search_gpt2_tf | 5eff2ceda0789ee265ad08b742f0d728884e3df8 | [
"MIT"
] | 1 | 2021-12-07T06:55:41.000Z | 2021-12-07T06:55:41.000Z | from .encoder import Encoder, QueryType
from .nbow_seq_encoder import NBoWEncoder
from .rnn_seq_encoder import RNNEncoder
from .self_att_encoder import SelfAttentionEncoder
from .conv_seq_encoder import ConvolutionSeqEncoder
from .conv_self_att_encoder import ConvSelfAttentionEncoder
from .gpt2_encoder import GPT2Encoder | 46 | 59 | 0.888199 |
79587783ba909c3c7e4094600f3f046a64c6614b | 1,245 | py | Python | Browser/keywords/__init__.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | 1 | 2021-09-02T02:09:01.000Z | 2021-09-02T02:09:01.000Z | Browser/keywords/__init__.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | 228 | 2020-12-18T07:15:41.000Z | 2022-03-25T13:11:56.000Z | Browser/keywords/__init__.py | emanlove/robotframework-browser | 8d9dae4301fe263bc0f7682de58a6bf299211382 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .browser_control import Control
from .cookie import Cookie
from .device_descriptors import Devices
from .evaluation import Evaluation
from .getters import Getters
from .interaction import Interaction
from .network import Network
from .playwright_state import PlaywrightState
from .promises import Promises
from .runonfailure import RunOnFailureKeywords
from .waiter import Waiter
from .webapp_state import WebAppState
__all__ = [
"Control",
"Cookie",
"Devices",
"Getters",
"Evaluation",
"Interaction",
"Network",
"PlaywrightState",
"Promises",
"RunOnFailureKeywords",
"Waiter",
"WebAppState",
]
| 29.642857 | 74 | 0.753414 |
79587910d23a66bec071df9ca3c49218c0fa64cf | 8,194 | py | Python | neutron/agent/l3/ha.py | acdc-cloud/neutron | 2510836886555179f9e9e39b1fdbf94296befc51 | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/ha.py | acdc-cloud/neutron | 2510836886555179f9e9e39b1fdbf94296befc51 | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/ha.py | acdc-cloud/neutron | 2510836886555179f9e9e39b1fdbf94296befc51 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import eventlet
from oslo_log import log as logging
from oslo_utils import fileutils
import webob
from neutron.agent.linux import utils as agent_utils
from neutron.common import constants
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG = 4096
TRANSLATION_MAP = {'master': constants.HA_ROUTER_STATE_ACTIVE,
'backup': constants.HA_ROUTER_STATE_STANDBY,
'fault': constants.HA_ROUTER_STATE_STANDBY,
'unknown': constants.HA_ROUTER_STATE_UNKNOWN}
class KeepalivedStateChangeHandler(object):
def __init__(self, agent):
self.agent = agent
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
router_id = req.headers['X-Neutron-Router-Id']
state = req.headers['X-Neutron-State']
self.enqueue(router_id, state)
def enqueue(self, router_id, state):
LOG.debug('Handling notification for router '
'%(router_id)s, state %(state)s', {'router_id': router_id,
'state': state})
self.agent.enqueue_state_change(router_id, state)
class L3AgentKeepalivedStateChangeServer(object):
def __init__(self, agent, conf):
self.agent = agent
self.conf = conf
agent_utils.ensure_directory_exists_without_file(
self.get_keepalived_state_change_socket_path(self.conf))
@classmethod
def get_keepalived_state_change_socket_path(cls, conf):
return os.path.join(conf.state_path, 'keepalived-state-change')
def run(self):
server = agent_utils.UnixDomainWSGIServer(
'neutron-keepalived-state-change',
num_threads=self.conf.ha_keepalived_state_change_server_threads)
server.start(KeepalivedStateChangeHandler(self.agent),
self.get_keepalived_state_change_socket_path(self.conf),
workers=0,
backlog=KEEPALIVED_STATE_CHANGE_SERVER_BACKLOG)
server.wait()
class AgentMixin(object):
def __init__(self, host):
self._init_ha_conf_path()
super(AgentMixin, self).__init__(host)
# BatchNotifier queue is needed to ensure that the HA router
# state change sequence is under the proper order.
self.state_change_notifier = batch_notifier.BatchNotifier(
self._calculate_batch_duration(), self.notify_server)
eventlet.spawn(self._start_keepalived_notifications_server)
def _get_router_info(self, router_id):
try:
return self.router_info[router_id]
except KeyError:
LOG.info('Router %s is not managed by this agent. It was '
'possibly deleted concurrently.', router_id)
def check_ha_state_for_router(self, router_id, current_state):
ri = self._get_router_info(router_id)
if not ri:
return
ha_state = ri.ha_state
if current_state != TRANSLATION_MAP[ha_state]:
LOG.debug("Updating server with state %(state)s for router "
"%(router_id)s", {'router_id': router_id,
'state': ha_state})
self.state_change_notifier.queue_event((router_id, ha_state))
def _start_keepalived_notifications_server(self):
state_change_server = (
L3AgentKeepalivedStateChangeServer(self, self.conf))
state_change_server.run()
def _calculate_batch_duration(self):
# Set the BatchNotifier interval to ha_vrrp_advert_int,
# default 2 seconds.
return self.conf.ha_vrrp_advert_int
def enqueue_state_change(self, router_id, state):
state_change_data = {"router_id": router_id, "state": state}
LOG.info('Router %(router_id)s transitioned to %(state)s',
state_change_data)
ri = self._get_router_info(router_id)
if ri is None:
return
# TODO(dalvarez): Fix bug 1677279 by moving the IPv6 parameters
# configuration to keepalived-state-change in order to remove the
# dependency that currently exists on l3-agent running for the IPv6
# failover.
self._configure_ipv6_params(ri, state)
if self.conf.enable_metadata_proxy:
self._update_metadata_proxy(ri, router_id, state)
self._update_radvd_daemon(ri, state)
self.pd.process_ha_state(router_id, state == 'master')
self.state_change_notifier.queue_event((router_id, state))
self.l3_ext_manager.ha_state_change(self.context, state_change_data)
def _configure_ipv6_params(self, ri, state):
if not self.use_ipv6:
return
ipv6_forwarding_enable = state == 'master'
if ri.router.get('distributed', False):
namespace = ri.ha_namespace
else:
namespace = ri.ns_name
if ipv6_forwarding_enable:
ri.driver.configure_ipv6_forwarding(
namespace, 'all', ipv6_forwarding_enable)
# If ipv6 is enabled on the platform, ipv6_gateway config flag is
# not set and external_network associated to the router does not
# include any IPv6 subnet, enable the gateway interface to accept
# Router Advts from upstream router for default route on master
# instances as well as ipv6 forwarding. Otherwise, disable them.
ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id']
if ex_gw_port_id:
interface_name = ri.get_external_device_name(ex_gw_port_id)
ri._configure_ipv6_params_on_gw(
ri.ex_gw_port, namespace, interface_name,
ipv6_forwarding_enable)
def _update_metadata_proxy(self, ri, router_id, state):
# NOTE(slaweq): Since the metadata proxy is spawned in the qrouter
# namespace and not in the snat namespace, even standby DVR-HA
# routers needs to serve metadata requests to local ports.
if state == 'master' or ri.router.get('distributed', False):
LOG.debug('Spawning metadata proxy for router %s', router_id)
self.metadata_driver.spawn_monitored_metadata_proxy(
self.process_monitor, ri.ns_name, self.conf.metadata_port,
self.conf, router_id=ri.router_id)
else:
LOG.debug('Closing metadata proxy for router %s', router_id)
self.metadata_driver.destroy_monitored_metadata_proxy(
self.process_monitor, ri.router_id, self.conf, ri.ns_name)
def _update_radvd_daemon(self, ri, state):
# Radvd has to be spawned only on the Master HA Router. If there are
# any state transitions, we enable/disable radvd accordingly.
if state == 'master':
ri.enable_radvd()
else:
ri.disable_radvd()
def notify_server(self, batched_events):
eventlet.spawn_n(self._notify_server, batched_events)
def _notify_server(self, batched_events):
translated_states = dict((router_id, TRANSLATION_MAP[state]) for
router_id, state in batched_events)
LOG.debug('Updating server with HA routers states %s',
translated_states)
self.plugin_rpc.update_ha_routers_states(
self.context, translated_states)
def _init_ha_conf_path(self):
ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path)
fileutils.ensure_tree(ha_full_path, mode=0o755)
| 41.383838 | 78 | 0.667684 |
79587986a92c0b8d4b4a40573ce4d95310569dcf | 9,716 | py | Python | multiagent/scenarios/food_collect.py | debajit15kgp/multiagent-envs | cc5bd1a1015636a07d8e703ee57067b315dde596 | [
"MIT"
] | null | null | null | multiagent/scenarios/food_collect.py | debajit15kgp/multiagent-envs | cc5bd1a1015636a07d8e703ee57067b315dde596 | [
"MIT"
] | null | null | null | multiagent/scenarios/food_collect.py | debajit15kgp/multiagent-envs | cc5bd1a1015636a07d8e703ee57067b315dde596 | [
"MIT"
] | null | null | null | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
import os
SIGHT = 100
ALPHA = 1
def softmax_dis(x):
x = np.asarray(x)
dis_min = np.min(x)
return dis_min
class Scenario(BaseScenario):
def __init__(self, n_good, n_adv, n_landmarks, n_food, n_forests, alpha, sight, no_wheel, ratio):
self.n_good = n_good
self.n_landmarks = n_landmarks
self.n_food = n_food
self.n_forests = n_forests
self.alpha = alpha
self.sight = sight
self.no_wheel = no_wheel
print(sight,"sight___simple_spread_v25")
print(alpha,"alpha######################")
def make_world(self):
world = World()
# set any world properties first
world.collaborative = True
world.dim_c = 2
num_good_agents = self.n_good
world.num_good_agents = num_good_agents
num_agents = num_good_agents
num_landmarks = self.n_landmarks
num_food = self.n_food
num_forests = self.n_forests
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.adversary = False
agent.size = 0.05
agent.accel = 4.0
agent.showmore = np.zeros(num_food)
agent.max_speed = 4
agent.live = 1
agent.mindis = 0
agent.time = 0
agent.occupy = 0
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0
landmark.boundary = False
# make initial conditions
world.food = [Landmark() for i in range(num_food)]
for i, landmark in enumerate(world.food):
landmark.name = 'food %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.03
landmark.boundary = False
landmark.occupy = [0]
landmark.mindis = 0
world.forests = [Landmark() for i in range(num_forests)]
for i, landmark in enumerate(world.forests):
landmark.name = 'forest %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.3
landmark.boundary = False
world.landmarks += world.food
world.landmarks += world.forests
self.reset_world(world)
return world
def reset_world(self, world):
seed = int.from_bytes(os.urandom(4), byteorder='little')
# print("reseed to", seed)
np.random.seed(seed)
for i, agent in enumerate(world.agents):
agent.color = np.array([0.6, 0.95, 0.45]) if not agent.adversary else np.array([0.95, 0.45, 0.45])
agent.live = 1
agent.mindis = 0
agent.time = 0
agent.occupy = [0]
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
for i, landmark in enumerate(world.food):
landmark.color = np.array([0.15, 0.15, 0.65])
for i, landmark in enumerate(world.forests):
landmark.color = np.array([0.6, 0.9, 0.6])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
for i, landmark in enumerate(world.food):
landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
landmark.occupy = 0
landmark.mindis = 0
for i, landmark in enumerate(world.forests):
landmark.state.p_pos = np.random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
if agent.adversary:
collisions = 0
for a in self.good_agents(world):
if self.is_collision(a, agent):
collisions += 1
return collisions
else:
return 0
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def done(self, agent, world):
return 0
def info(self, agent, world):
time_grass = []
time_live = []
mark_grass = 0
if agent.live:
time_live.append(1)
for food in world.food:
if self.is_collision(agent, food):
mark_grass = 1
break
else:
time_live.append(0)
if mark_grass:
time_grass.append(1)
else:
time_grass.append(0)
return np.concatenate([np.array(time_grass)]+[np.array(agent.occupy)])
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark
# main_reward = self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
main_reward = self.reward_all_in_once(agent, world)
return main_reward
def reward_all_in_once(self, agent, world):
alpha = self.alpha
num_agents = len(world.agents)
reward_n = np.zeros(num_agents)
# reward_n = [0]* num_agents
# print(reward_n)
alpha_sharing = self.alpha
shape = True
good_collide_id = []
food_id = []
for i, agent_new in enumerate(world.agents):
agent_new.time += 1/26
# collision reward:
if agent_new.collide:
for j, good in enumerate(world.agents):
if good is not agent_new:
if self.is_collision(good, agent_new):
reward_n[i] = reward_n[i]-3/num_agents*(1-alpha)
reward_buffer = -3/num_agents*(alpha)*np.ones(num_agents)
reward_n = reward_n+reward_buffer
# shape food
full_house = []
for food in world.food:
if food.occupy==1:
full_house.append(1)
if not self.no_wheel:
for i, agent_new in enumerate(world.agents):
min_dis = min([np.sqrt(np.sum(np.square(food.state.p_pos - agent_new.state.p_pos))) for food in world.food])
dis_change = -1*min_dis
agent_new.mindis = min_dis
reward_n[i] = reward_n[i]+dis_change
occupy_list = []
mark=0
for food in world.food:
mark=0
for agent_new in world.agents:
if self.is_collision(food, agent_new):
mark=1
occupy_list.append(1)
reward_buffer = 6/num_agents*np.ones(num_agents)
reward_n = reward_n+reward_buffer
food.occupy=1
break
if mark==0:
food.occupy=0
if not self.no_wheel:
if len(occupy_list)==len(world.food):
reward_buffer = 10*np.ones(num_agents)-agent_new.time*2
reward_n = reward_n+reward_buffer
for agent_new in world.agents:
agent_new.occupy = [len(occupy_list)/len(world.food)]
return list(reward_n)
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
distance = np.sqrt(np.sum(np.square(entity.state.p_pos - agent.state.p_pos)))
if distance > self.sight:
entity_pos.append([0,0,0])
else:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
entity_pos.append([1])
# communication of all other agents
comm = []
other_pos = []
other_vel = []
other_live = []
other_time = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
distance = np.sqrt(np.sum(np.square(other.state.p_pos - agent.state.p_pos)))
# print(distance,'distance')
# print(other.live, 'other_live')
if distance > self.sight or (not other.live):
other_pos.append([0,0])
other_vel.append([0,0])
other_live.append([0])
other_time.append([0])
else:
other_pos.append(other.state.p_pos - agent.state.p_pos)
other_vel.append(other.state.p_vel)
other_live.append(np.array([other.live]))
other_time.append(np.array([other.time]))
result = np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + [np.array([agent.live])] + entity_pos + other_pos + other_vel + other_live)
return result
| 36.118959 | 151 | 0.558254 |
79587a52ef1bfb321da77e381af034d9ef7b57e5 | 483 | py | Python | month03.2/django/day05/mysitel3/oto/models.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | 1 | 2021-02-02T02:17:37.000Z | 2021-02-02T02:17:37.000Z | month03.2/django/day05/mysitel3/oto/models.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | null | null | null | month03.2/django/day05/mysitel3/oto/models.py | Amiao-miao/all-codes | ec50036d42d40086cac5fddf6baf4de18ac91e55 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
class Author(models.Model):
name=models.CharField('姓名',max_length=20)
def __str__(self):
return self.name
class Meta:
verbose_name_plural='作者'
class Wife(models.Model):
name=models.CharField('姓名',max_length=20)
author=models.OneToOneField(Author,verbose_name='作者',on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
verbose_name_plural='妻子' | 24.15 | 82 | 0.693582 |
79587c106fe6a3b4fd76960ce09a5b1519cbb0cf | 8,391 | py | Python | Training_BigDL_Zoo/4.3-Training-InclusiveClassifier-TFPark.py | jenniew/SparkDLTrigger | 893cb06357a32d12f4a1ce24ec91d8e8ac47fd0b | [
"Apache-2.0"
] | null | null | null | Training_BigDL_Zoo/4.3-Training-InclusiveClassifier-TFPark.py | jenniew/SparkDLTrigger | 893cb06357a32d12f4a1ce24ec91d8e8ac47fd0b | [
"Apache-2.0"
] | null | null | null | Training_BigDL_Zoo/4.3-Training-InclusiveClassifier-TFPark.py | jenniew/SparkDLTrigger | 893cb06357a32d12f4a1ce24ec91d8e8ac47fd0b | [
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Now we will train the Inclusive classifier, a combination of the Particle-sequence classifier with the High Level Features.
#
# To run this notebook we used the following configuration:
# * *Software stack*: LCG 94 (it has spark 2.3.1)
# * *Platform*: centos7-gcc7
# * *Spark cluster*: Hadalytic
# In[1]:
# Check if Spark Session has been created correctly
spark
# In[2]:
# Add the BDL zip file
# sc.addPyFile("/eos/project/s/swan/public/BigDL/bigdl-0.7.0-python-api.zip")
# ## Load train and test dataset
# In[3]:
# ## Create the model
# In[5]:
# Init analytics zoo
from zoo.common.nncontext import *
from pyspark.sql import SQLContext
sc = init_nncontext("inclusive classifier")
PATH = "file:///data/cern/"
sql_context = SQLContext(sc)
trainDF = sql_context.read.format('parquet').load(PATH + 'trainUndersampled.parquet').select(
['GRU_input', 'HLF_input', 'encoded_label'])
testDF = sql_context.read.format('parquet').load(PATH + 'testUndersampled.parquet').select(
['GRU_input', 'HLF_input', 'encoded_label'])
# In[4]:
trainDF.printSchema()
# In[6]:
import tensorflow as tf
from tensorflow.keras import Sequential, Input, Model
from tensorflow.keras.layers import Masking, Dense, Activation, GRU, Dropout, concatenate
# from zoo.pipeline.api.keras.layers.torch import Select
# from zoo.pipeline.api.keras.layers mport BatchNormalization
# from zoo.pipeline.api.keras.layers import GRU
# from zoo.pipeline.api.keras.engine.topology import Merge
## GRU branch
gru_input = Input(shape=(801,19), name='gru_input')
masking = Masking(mask_value=0.)(gru_input)
gru = GRU(units=50,activation='tanh')(masking)
gruBranch = Dropout(0.2)(gru)
hlf_input = Input(shape=(14,), name='hlf_input')
hlfBranch = Dropout(0.2)(hlf_input)
concat = concatenate([gruBranch, hlfBranch])
dense = Dense(25, activation='relu')(concat)
output = Dense(3, activation='softmax')(dense)
model = Model(inputs=[gru_input, hlf_input], outputs=output)
# In[7]:
# from bigdl.util.common import Sample
import numpy as np
trainRDD = trainDF.rdd.map(lambda row: ([np.array(row.GRU_input), np.array(row.HLF_input)],
np.array(row.encoded_label))
)
testRDD = testDF.rdd.map(lambda row: ([np.array(row.GRU_input), np.array(row.HLF_input)],
np.array(row.encoded_label))
)
# trainRDD = trainDF.rdd.map(lambda row: Sample.from_ndarray(
# [np.array(row.GRU_input), np.array(row.HLF_input)],
# np.array(row.encoded_label)
# ))
# testRDD = testDF.rdd.map(lambda row: Sample.from_ndarray(
# [np.array(row.GRU_input), np.array(row.HLF_input)],
# np.array(row.encoded_label)
# ))
# In[8]:
# trainRDD.count()
# In[9]:
# testRDD.count()
# ## Create train and valiation Data
#
# We need to create an RDD of a tuple of the form (`features`, `label`). The two elements of this touple should be `numpy arrays`.
# In[10]:
# Let's have a look at one element of trainRDD
trainRDD.take(1)
# We can see that `features` is now composed by the list of 801 particles with 19 features each (`shape=[801 19]`) plus the HLF (`shape=[14]`) and the encoded label (`shape=[3]`).
# In[11]:
from zoo.pipeline.api.net import TFDataset
from zoo.tfpark.model import KerasModel
# create TFDataset for TF training
dataset = TFDataset.from_rdd(trainRDD,
features=[(tf.float32, [801, 19]), (tf.float32, [14])],
labels=(tf.float32, [3]),
batch_size=256,
val_rdd=testRDD)
# ## Optimizer setup and training
# In[12]:
# Set of hyperparameters
numEpochs = 8
# The batch used by BDL must be a multiple of numExecutors * executorCores
# Because data will be equally distibuted inside each executor
workerBatch = 64
# numExecutors = int(sc._conf.get('spark.executor.instances'))
numExecutors = 1
# executorCores = int(sc._conf.get('spark.executor.cores'))
executorCores = 4
BDLbatch = workerBatch * numExecutors * executorCores
# In[13]:
# Use Keras model training API to train
from bigdl.optim.optimizer import *
# from bigdl.nn.criterion import CategoricalCrossEntropy
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
keras_model = KerasModel(model)
# model.compile(optimizer='adam', loss=CategoricalCrossEntropy(), metrics=[Loss(CategoricalCrossEntropy())])
# Let's define a directory to store logs (i.e. train and validation losses) and save models
# In[14]:
# name of our application
appName = "InclusiveClassifier"
# Change it!
logDir = "/data/cern/TFParklogs"
# Check if there is already an application with the same name
# and remove it, otherwise logs will be appended to that app
import os
try:
os.system('rm -rf '+logDir+'/'+appName)
except:
pass
print("Saving logs to {}".format(logDir+'/'+appName))
# In[15]:
# Set tensorboard for model training and validation
# keras_model.set_tensorboard(logDir, appName)
trainSummary = TrainSummary(log_dir=logDir,app_name=appName)
valSummary = ValidationSummary(log_dir=logDir,app_name=appName)
keras_model.set_train_summary(trainSummary)
keras_model.set_val_summary(valSummary)
# In[ ]:
keras_model.fit(x=dataset, epochs=numEpochs, distributed=True)
# We are now ready to launch the training.
#
# **Warnign: During the trainign it would be better to shutdown the Toggle Spark Monitorin Display because each iteration is seen as a spark job, therefore the toggle will try to display everything causing problem to the browser.**
# In[55]:
# %%time
# model.fit(x=trainRDD, batch_size=BDLbatch, nb_epoch=numEpochs, validation_data=testRDD, distributed=True)
# ## Plot loss
# In[57]:
# import matplotlib.pyplot as plt
# plt.style.use('seaborn-darkgrid')
# get_ipython().magic(u'matplotlib notebook')
#
# # trainSummary = TrainSummary(log_dir=logDir,app_name=appName)
# loss = np.array(trainSummary.read_scalar("Loss"))
# # valSummary = ValidationSummary(log_dir=logDir,app_name=appName)
# val_loss = np.array(valSummary.read_scalar("Loss"))
#
# plt.plot(loss[:,0], loss[:,1], label="Training loss")
# plt.plot(val_loss[:,0], val_loss[:,1], label="Validation loss", color='crimson', alpha=0.8)
# plt.xlabel('Iteration')
# plt.ylabel('Loss')
# plt.legend()
# plt.title("Particle sequence classifier loss")
# plt.show()
#
# ## Save the model
# In[59]:
modelDir = os.path.join(logDir, "models", "inclusive.model")
print modelDir
keras_model.save_model(modelDir)
# It is possible to load the model in the following way:
# ```Python
# model = Model.loadModel(modelPath=modelPath+'.bigdl', weightPath=modelPath+'.bin')
# ```
# ## Prediction
# In[60]:
testRDD2 = testDF.rdd.map(lambda row: [np.array(row.GRU_input), np.array(row.HLF_input)])
test_dataset = TFDataset.from_rdd(testRDD2,
features=[(tf.float32, [801, 19]), (tf.float32, [14])],
labels=None,
batch_per_thread=64)
# In[61]:
predRDD = keras_model.predict(test_dataset)
# In[62]:
result = predRDD.collect()
# In[63]:
y_pred = np.squeeze(result)
y_true = np.asarray(testDF.select('encoded_label').rdd.map(lambda row: np.asarray(row.encoded_label)).collect())
# In[64]:
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# In[65]:
# plt.figure()
# plt.plot(fpr[0], tpr[0], lw=2,
# label='Inclusive classifier (AUC) = %0.4f' % roc_auc[0])
# plt.plot([0, 1], [0, 1], linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('Background Contamination (FPR)')
# plt.ylabel('Signal Efficiency (TPR)')
# plt.title('$tt$ selector')
# plt.legend(loc="lower right")
# plt.show()
#
# In[ ]:
loss = np.array(trainSummary.read_scalar("Accuracy"))
# valSummary = ValidationSummary(log_dir=logDir,app_name=appName)
val_loss = np.array(valSummary.read_scalar("Accuracy"))
# get_ipython().magic(u'matplotlib notebook')
# plt.figure()
#
# plt.plot(history.history['acc'], label='train')
# plt.plot(history.history['val_acc'], label='validation')
# plt.ylabel('Accuracy')
# plt.xlabel('epoch')
# plt.legend(loc='lower right')
# plt.title("HLF classifier accuracy")
# plt.show()
# In[ ]:
# In[ ]:
| 24.825444 | 232 | 0.690978 |
79587dec57730be24ebf60f67a20c57c2b0d57f5 | 1,716 | py | Python | tests/test_cryptocurrencychart.py | gdoermann/cryptocurrencychart | 713dae89304c86599f4a14a6dbd40f2e21c77048 | [
"MIT"
] | 1 | 2021-04-16T19:25:46.000Z | 2021-04-16T19:25:46.000Z | tests/test_cryptocurrencychart.py | gdoermann/cryptocurrencychart | 713dae89304c86599f4a14a6dbd40f2e21c77048 | [
"MIT"
] | null | null | null | tests/test_cryptocurrencychart.py | gdoermann/cryptocurrencychart | 713dae89304c86599f4a14a6dbd40f2e21c77048 | [
"MIT"
] | 1 | 2020-11-12T12:14:22.000Z | 2020-11-12T12:14:22.000Z | import datetime
import unittest
from cryptocurrencychart.api import CryptoCurrencyChartApi
class TestApi(unittest.TestCase):
def setUp(self) -> None:
self.reset()
def reset(self):
self.api = CryptoCurrencyChartApi()
def test_api_connection(self):
currencies = self.api.get_base_currencies()
self.assertTrue('USD' in currencies)
def test_set_base_currency(self):
self.api.set_base_currency('USD', validate=False)
self.assertEqual(self.api.BASE, 'USD')
self.api.set_base_currency('CZK', validate=True)
self.assertEqual(self.api.BASE, 'CZK')
self.assertRaises(ValueError, self.api.set_base_currency, 'GBALK', validate=True)
self.assertEqual(self.api.BASE, 'CZK')
self.api.set_base_currency('GBALK', validate=False)
self.assertEqual(self.api.BASE, 'GBALK')
self.api.set_base_currency('USD', validate=False)
self.assertEqual(self.api.BASE, 'USD')
self.reset()
def test_get_coins(self):
coins = self.api.get_coins()
self.assertIsNotNone(coins)
self.assertIsInstance(coins, list)
def test_view_coin(self):
btc = self.api.coin_dict.get('BTC')
response = self.api.view_coin(btc['id'], datetime.date.today() - datetime.timedelta(days=1), 'USD')
self.assertEqual(response['baseCurrency'], 'USD')
def test_view_coin_history(self):
btc = self.api.coin_dict.get('BTC')
end = datetime.date.today() - datetime.timedelta(days=1)
start = end - datetime.timedelta(days=30)
response = self.api.view_coin_history(btc['id'], start, end, 'USD')
self.assertEqual(response['baseCurrency'], 'USD')
| 37.304348 | 107 | 0.666084 |
79587efbf86ed08d4264578ffa7b6e668d9c1e64 | 1,410 | py | Python | hierarc/Util/likelihood_util.py | aymgal/hierArc | a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269 | [
"BSD-3-Clause"
] | 5 | 2020-07-08T00:53:04.000Z | 2021-08-03T08:20:31.000Z | hierarc/Util/likelihood_util.py | aymgal/hierArc | a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269 | [
"BSD-3-Clause"
] | 4 | 2020-03-30T22:12:57.000Z | 2021-04-03T06:20:52.000Z | hierarc/Util/likelihood_util.py | aymgal/hierArc | a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269 | [
"BSD-3-Clause"
] | 5 | 2020-03-30T21:20:08.000Z | 2021-03-03T17:08:42.000Z | import numpy as np
from scipy.stats import truncnorm
def log_likelihood_cov(data, model, cov_error):
"""
log likelihood of the data given a model
:param data: data vector
:param model: model vector
:param cov_error: inverse covariance matrix
:return: log likelihood
"""
delta = data - model
return -delta.dot(cov_error.dot(delta)) / 2.
def cov_error_create(error_independent, error_covariance):
"""
generates an error covariance matrix from a set of independent uncertainties combined with a fully covariant term
:param error_independent: array of Gaussian 1-sigma uncertainties
:param error_covariance: float, shared covariant error among all data points. So if all data points are off by
1-sigma, then the log likelihood is 1-sigma
:return: error covariance matrix
"""
error_covariance_array = np.ones_like(error_independent) * error_covariance
error = np.outer(error_covariance_array, error_covariance_array) + np.diag(error_independent**2)
return np.linalg.inv(error)
def get_truncated_normal(mean=0, sd=1, low=0, upp=10, size=1):
"""
:param mean: mean of normal distribution
:param sd: standard deviation
:param low: lower bound
:param upp: upper bound
:return: float, draw of distribution
"""
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd).rvs(size)
| 32.790698 | 117 | 0.710638 |
79587f2e00169e479a909240bbf29dedda4b25db | 8,328 | py | Python | joblib/externals/loky/backend/semaphore_tracker.py | cclauss/joblib | 902fb6bbcf75c461d1b6703e5a01605fc592f214 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T10:25:24.000Z | 2019-07-16T10:25:24.000Z | joblib/externals/loky/backend/semaphore_tracker.py | cclauss/joblib | 902fb6bbcf75c461d1b6703e5a01605fc592f214 | [
"BSD-3-Clause"
] | 1 | 2020-02-17T00:13:09.000Z | 2020-02-17T00:13:09.000Z | joblib/externals/loky/backend/semaphore_tracker.py | jdanbrown/joblib | e205833ed42f0f1c72d69d96f4c266734cea9d95 | [
"BSD-3-Clause"
] | 1 | 2019-03-25T09:56:23.000Z | 2019-03-25T09:56:23.000Z | ###############################################################################
# Server process to keep track of unlinked semaphores and clean them.
#
# author: Thomas Moreau
#
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
# * include custom spawnv_passfds to start the process
# * use custom unlink from our own SemLock implementation
# * add some VERBOSE logging
#
#
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
from . import spawn
from multiprocessing import util
try:
from _multiprocessing import sem_unlink
except ImportError:
from .semlock import sem_unlink
__all__ = ['ensure_running', 'register', 'unregister']
VERBOSE = False
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
# semaphore tracker was launched before, is it still running?
if self._check_alive():
# => still alive
return
# => dead, launch it again
os.close(self._fd)
self._fd = None
self._pid = None
warnings.warn('semaphore_tracker: process died unexpectedly, '
'relaunching. Some semaphores might leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from {} import main; main(%d)'.format(main.__module__)
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
# In python 3.3, there is a bug which put `-RRRRR..` instead of
# `-R` in args. Replace it to get the correct flags.
# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488
if sys.version_info[:2] <= (3, 3):
import re
for i in range(1, len(args)):
args[i] = re.sub("-R+", "-R", args[i])
args += ['-c', cmd % r]
util.debug("launching Semaphore tracker: {}".format(args))
pid = spawnv_passfds(exe, args, fds_to_pass)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
os.close(r)
def _check_alive(self):
'''Check for the existence of the semaphore tracker process.'''
try:
self._send('PROBE', '')
except BrokenPipeError:
return False
else:
return True
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self.ensure_running()
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self.ensure_running()
self._send('UNREGISTER', name)
def _send(self, cmd, name):
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if VERBOSE: # pragma: no cover
sys.stderr.write("Main semaphore tracker is running\n")
sys.stderr.flush()
cache = set()
try:
# keep track of registered/unregistered semaphores
with os.fdopen(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
name = name.decode('ascii')
cache.add(name)
if VERBOSE: # pragma: no cover
sys.stderr.write("[SemaphoreTracker] register {}\n"
.format(name))
sys.stderr.flush()
elif cmd == b'UNREGISTER':
name = name.decode('ascii')
cache.remove(name)
if VERBOSE: # pragma: no cover
sys.stderr.write("[SemaphoreTracker] unregister {}"
": cache({})\n"
.format(name, len(cache)))
sys.stderr.flush()
elif cmd == b'PROBE':
pass
else:
raise RuntimeError('unrecognized command %r' % cmd)
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
try:
sem_unlink(name)
if VERBOSE: # pragma: no cover
name = name.decode('ascii')
sys.stderr.write("[SemaphoreTracker] unlink {}\n"
.format(name))
sys.stderr.flush()
except Exception as e:
warnings.warn('semaphore_tracker: %r: %r' % (name, e))
finally:
pass
if VERBOSE: # pragma: no cover
sys.stderr.write("semaphore tracker shut down\n")
sys.stderr.flush()
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
_pass = []
for fd in passfds:
_pass += [_mk_inheritable(fd)]
from .fork_exec import fork_exec
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
| 35.139241 | 87 | 0.537104 |
79587f6fd4e1d7353c768b5e7b8ec6e958857925 | 8,749 | py | Python | python/cloudtik/core/_private/parameter.py | haojinIntel/cloudtik-1 | 8687c7e8c9b362a30674aea571ef0d736ec42c49 | [
"Apache-2.0"
] | null | null | null | python/cloudtik/core/_private/parameter.py | haojinIntel/cloudtik-1 | 8687c7e8c9b362a30674aea571ef0d736ec42c49 | [
"Apache-2.0"
] | null | null | null | python/cloudtik/core/_private/parameter.py | haojinIntel/cloudtik-1 | 8687c7e8c9b362a30674aea571ef0d736ec42c49 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import cloudtik.core._private.constants as constants
logger = logging.getLogger(__name__)
class StartParams:
"""A class used to store the start-up parameters used.
Attributes:
external_addresses (str): The address of external Redis server to
connect to, in format of "ip1:port1,ip2:port2,...". If this
address is provided, then we won't start Redis instances in the
head node but use external Redis server(s) instead.
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
cluster controller, and some workers.
It will also kill these processes when Python exits.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then it will fall back to constants.CLOUDTIK_DEFAULT_PORT,
or a random port if the default is not available.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards. If None, then it will fall back to the ports right after
redis_port, or random ports if those are not available.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries. This only applies to the
sharded redis tables (task and object tables).
node_ip_address (str): The IP address of the node that we are on.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See constants.py.
temp_dir (str): If provided, it will specify the root temporary
directory.
runtime_dir_name (str): If provided, specifies the directory that
will be created in the session dir to hold runtime_env files.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
cluster_scaling_config: path to autoscaling config file.
metrics_export_port(int): The port at which metrics are exposed
through a Prometheus endpoint.
env_vars (dict): Override environment variables for the node.
num_cpus (int): Number of CPUs to configure the cloudtik.
num_gpus (int): Number of GPUs to configure the cloudtik.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: Total available memory for workers requesting memory.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
"""
def __init__(self,
external_addresses=None,
redis_address=None,
redis_max_memory=None,
redis_port=None,
redis_shard_ports=None,
node_ip_address=None,
num_redis_shards=None,
redis_max_clients=None,
redis_password=constants.CLOUDTIK_REDIS_DEFAULT_PASSWORD,
logging_level=logging.INFO,
logging_format=constants.LOGGER_FORMAT,
temp_dir=None,
runtime_dir_name=None,
include_log_monitor=None,
cluster_scaling_config=None,
metrics_export_port=None,
env_vars=None,
resources=None,
num_cpus=None,
num_gpus=None,
memory=None,
redirect_output=None
):
self.external_addresses = external_addresses
self.redis_address = redis_address
self.redis_max_memory = redis_max_memory
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.node_ip_address = node_ip_address
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_password = redis_password
self.temp_dir = temp_dir
self.runtime_dir_name = (
runtime_dir_name or constants.CLOUDTIK_DEFAULT_RUNTIME_DIR_NAME)
self.include_log_monitor = include_log_monitor
self.cluster_scaling_config = cluster_scaling_config
self.metrics_export_port = metrics_export_port
self.env_vars = env_vars
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.redirect_output = redirect_output
self.resources = resources
self._check_usage()
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError(
f"Invalid StartParams parameter in update: {arg}")
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid StartParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage()
def update_pre_selected_port(self):
"""Update the pre-selected port information
Returns:
The dictionary mapping of component -> ports.
"""
def wrap_port(port):
# 0 port means select a random port for the grpc server.
if port is None or port == 0:
return []
else:
return [port]
# Create a dictionary of the component -> port mapping.
pre_selected_ports = {
"redis": wrap_port(self.redis_port),
"metrics_export": wrap_port(self.metrics_export_port),
}
redis_shard_ports = self.redis_shard_ports
if redis_shard_ports is None:
redis_shard_ports = []
pre_selected_ports["redis_shards"] = redis_shard_ports
# Update the pre selected port set.
self.reserved_ports = set()
for comp, port_list in pre_selected_ports.items():
for port in port_list:
if port in self.reserved_ports:
raise ValueError(
f"Component {comp} is trying to use "
f"a port number {port} that is used by "
"other components.\n"
f"Port information: "
f"{self._format_ports(pre_selected_ports)}\n"
"If you allocate ports, "
"please make sure the same port is not used by "
"multiple components.")
self.reserved_ports.add(port)
def _check_usage(self):
pass
def _format_ports(self, pre_selected_ports):
"""Format the pre selected ports information to be more
human readable.
"""
ports = pre_selected_ports.copy()
for comp, port_list in ports.items():
if len(port_list) == 1:
ports[comp] = port_list[0]
elif len(port_list) == 0:
# Nothing is selected, meaning it will be randomly selected.
ports[comp] = "random"
elif comp == "worker_ports":
min_port = port_list[0]
max_port = port_list[len(port_list) - 1]
port_range_str = None
if len(port_list) < 50:
port_range_str = str(port_list)
else:
port_range_str = f"from {min_port} to {max_port}"
ports[comp] = (f"{len(port_list)} ports {port_range_str}")
return ports
| 42.887255 | 83 | 0.603269 |
79588030f634d7f5f47cea4b465555d2396164f8 | 18,611 | py | Python | modules/util.py | shovelingpig/SAFA | 35cd638ab299e58ba303bf64874287abdbcf9fd6 | [
"RSA-MD"
] | 1 | 2021-12-16T13:38:21.000Z | 2021-12-16T13:38:21.000Z | modules/util.py | shovelingpig/SAFA | 35cd638ab299e58ba303bf64874287abdbcf9fd6 | [
"RSA-MD"
] | null | null | null | modules/util.py | shovelingpig/SAFA | 35cd638ab299e58ba303bf64874287abdbcf9fd6 | [
"RSA-MD"
] | null | null | null | import math
from torch import nn
import torch.nn.functional as F
import torch
import torchvision.models as models
def kp2gaussian(kp, spatial_size, kp_variance):
"""
Transform a keypoint into gaussian like representation
"""
mean = kp['value']
coordinate_grid = make_coordinate_grid(spatial_size, mean.type())
number_of_leading_dimensions = len(mean.shape) - 1
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
coordinate_grid = coordinate_grid.view(*shape)
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)
coordinate_grid = coordinate_grid.repeat(*repeats)
# Preprocess kp shape
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)
mean = mean.view(*shape)
mean_sub = (coordinate_grid - mean)
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed
class ResBlock2d(nn.Module):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=padding)
self.norm1 = nn.BatchNorm2d(in_features, affine=True)
self.norm2 = nn.BatchNorm2d(in_features, affine=True)
def forward(self, x):
out = self.norm1(x)
out = F.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = F.relu(out)
out = self.conv2(out)
out += x
return out
class UpBlock2d(nn.Module):
"""
Upsampling block for use in decoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(UpBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
out = self.norm(out)
out = F.relu(out)
return out
class GADEUpBlock2d(nn.Module):
"""
Geometrically-Adaptive Denormalization Upsampling block for use in decoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1, z_size=1280):
super(GADEUpBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
self.fc_1 = nn.Linear(z_size, out_features)
self.fc_2 = nn.Linear(z_size, out_features)
self.conv_f = nn.Conv2d(out_features, out_features, kernel_size=3, stride=1, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, z):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
out = self.norm(out)
m = self.sigmoid(self.conv_f(out))
r = self.fc_1(z).unsqueeze(-1).unsqueeze(-1).expand_as(out)
beta = self.fc_2(z).unsqueeze(-1).unsqueeze(-1).expand_as(out)
addin_z = r * out + beta
out = m * addin_z + (1 - m) * out
out = F.relu(out)
return out
class DownBlock2d(nn.Module):
"""
Downsampling block for use in encoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(DownBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out)
out = self.pool(out)
return out
class SameBlock2d(nn.Module):
"""
Simple block, preserve spatial resolution.
"""
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):
super(SameBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
kernel_size=kernel_size, padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out)
return out
class Encoder(nn.Module):
"""
Hourglass Encoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Encoder, self).__init__()
down_blocks = []
for i in range(num_blocks):
down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3, padding=1))
self.down_blocks = nn.ModuleList(down_blocks)
def forward(self, x):
outs = [x]
for down_block in self.down_blocks:
outs.append(down_block(outs[-1]))
return outs
class Decoder(nn.Module):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder, self).__init__()
up_blocks = []
for i in range(num_blocks)[::-1]:
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
out_filters = min(max_features, block_expansion * (2 ** i))
up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))
self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def forward(self, x):
out = x.pop()
for up_block in self.up_blocks:
out = up_block(out)
skip = x.pop()
out = torch.cat([out, skip], dim=1)
return out
class Hourglass(nn.Module):
"""
Hourglass architecture.
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Hourglass, self).__init__()
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
self.out_filters = self.decoder.out_filters
def forward(self, x):
return self.decoder(self.encoder(x))
class AntiAliasInterpolation2d(nn.Module):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
self.scale = scale
inv_scale = 1 / scale
self.int_inv_scale = int(inv_scale)
def forward(self, input):
if self.scale == 1.0:
return input
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
out = F.conv2d(out, weight=self.weight, groups=self.groups)
out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]
return out
class mymobilenetv2(nn.Module):
def __init__(self, num_classes=1000, image_size=256):
super(mymobilenetv2, self).__init__()
self.model = models.mobilenet_v2(pretrained=True)
self.n_layers = len(self.model.features)
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(self.model.last_channel, num_classes)
self.fc.weight.data.zero_()
self.fc.bias.data.zero_()
self.model.classifier = nn.Sequential(
self.dropout,
self.fc,
)
self.scale_factor = 224.0 / image_size
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(3, self.scale_factor)
self.feature_maps = {}
def forward(self, x):
if self.scale_factor != 1:
x = self.down(x)
feature = self.model.features[0](x)
for i in range(18):
feature = self.model.features[i+1](feature)
feature = nn.functional.adaptive_avg_pool2d(feature, 1).reshape(feature.shape[0], -1)
code = self.model.classifier(feature)
return code, feature
class ContextualAttention(nn.Module):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/model/networks.py
"""
def __init__(self, ksize=3, stride=1, rate=1, fuse_k=3, softmax_scale=10, fuse=True):
super(ContextualAttention, self).__init__()
self.ksize = ksize
self.stride = stride
self.rate = rate
self.fuse_k = fuse_k
self.softmax_scale = softmax_scale # to fit the PyTorch tensor image value range
self.fuse = fuse
if self.fuse:
fuse_weight = torch.eye(fuse_k).view(1, 1, fuse_k, fuse_k) # 1*1*k*k
self.register_buffer('fuse_weight', fuse_weight)
def forward(self, f, b, mask):
""" Contextual attention layer implementation.
Contextual attention is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
Args:
f: Input feature to match (foreground).
b: Input feature for match (background).
mask: Input mask for b, indicating patches not available.
ksize: Kernel size for contextual attention.
stride: Stride for extracting patches from b.
rate: Dilation for matching.
softmax_scale: Scaled softmax for attention.
Returns:
torch.tensor: output
"""
# get shapes
raw_int_fs = list(f.size())
raw_int_bs = list(b.size())
# extract patches from background with stride and rate
kernel = 2 * self.rate
# raw_w is extracted for reconstruction
raw_w = extract_image_patches(b, ksizes=[kernel, kernel],
strides=[self.rate*self.stride,
self.rate*self.stride],
rates=[1, 1],
padding='same')
raw_w = raw_w.view(raw_int_bs[0], raw_int_bs[1], kernel, kernel, -1)
raw_w = raw_w.permute(0, 4, 1, 2, 3)
raw_w_groups = torch.split(raw_w, 1, dim=0)
# downscaling foreground option: downscaling both foreground and
# background for matching and use original background for reconstruction.
f = F.interpolate(f, scale_factor=1./self.rate, mode='nearest')
b = F.interpolate(b, scale_factor=1./self.rate, mode='nearest')
int_fs = list(f.size())
int_bs = list(b.size())
f_groups = torch.split(f, 1, dim=0) # split tensors along the batch dimension
w = extract_image_patches(b, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
w = w.view(int_bs[0], int_bs[1], self.ksize, self.ksize, -1)
w = w.permute(0, 4, 1, 2, 3)
w_groups = torch.split(w, 1, dim=0)
mask = F.interpolate(mask, scale_factor=1./(self.rate), mode='nearest')
int_ms = list(mask.size())
m = extract_image_patches(mask, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
m = m.view(int_ms[0], int_ms[1], self.ksize, self.ksize, -1)
m = m.permute(0, 4, 1, 2, 3)
mm = reduce_mean(m, axis=[3, 4]).unsqueeze(-1)
y = []
for i, (xi, wi, raw_wi) in enumerate(zip(f_groups, w_groups, raw_w_groups)):
'''
O => output channel as a conv filter
I => input channel as a conv filter
xi : separated tensor along batch dimension of front;
wi : separated patch tensor along batch dimension of back;
raw_wi : separated tensor along batch dimension of back;
'''
# conv for compare
wi = wi[0]
max_wi = torch.sqrt(reduce_sum(torch.pow(wi, 2) + 1e-4, axis=[1, 2, 3], keepdim=True))
wi_normed = wi / max_wi
xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1])
yi = F.conv2d(xi, wi_normed, stride=1)
if self.fuse:
# make all of depth to spatial resolution
yi = yi.view(1, 1, int_bs[2]*int_bs[3], int_fs[2]*int_fs[3])
yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])
yi = F.conv2d(yi, self.fuse_weight, stride=1)
yi = yi.contiguous().view(1, int_bs[2], int_bs[3], int_fs[2], int_fs[3])
yi = yi.permute(0, 2, 1, 4, 3)
yi = yi.contiguous().view(1, 1, int_bs[2]*int_bs[3], int_fs[2]*int_fs[3])
yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])
yi = F.conv2d(yi, self.fuse_weight, stride=1)
yi = yi.contiguous().view(1, int_bs[3], int_bs[2], int_fs[3], int_fs[2])
yi = yi.permute(0, 2, 1, 4, 3).contiguous()
yi = yi.view(1, int_bs[2] * int_bs[3], int_fs[2], int_fs[3])
# softmax to match
yi = yi * mm[i:i+1]
yi = F.softmax(yi*self.softmax_scale, dim=1)
yi = yi * mm[i:i+1]
# deconv for patch pasting
wi_center = raw_wi[0]
yi = F.conv_transpose2d(yi, wi_center, stride=self.rate, padding=1) / 4.0
y.append(yi)
y = torch.cat(y, dim=0)
y.contiguous().view(raw_int_fs)
return y
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches
def same_padding(images, ksizes, strides, rates):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def reduce_mean(x, axis=None, keepdim=False):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 36.420744 | 116 | 0.593305 |
795880ec1d91501ee3c866f24bfe2a5da7ac3772 | 393 | py | Python | Alphabetic Patterns/alphabeticpattern103.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | null | null | null | Alphabetic Patterns/alphabeticpattern103.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | 471 | 2022-01-15T07:07:18.000Z | 2022-02-28T16:01:42.000Z | Alphabetic Patterns/alphabeticpattern103.py | vaidehisinha1/Python-PatternHouse | 49f71bcc5319a838592e69b0e49ef1edba32bf7c | [
"MIT"
] | 2 | 2022-01-17T09:43:16.000Z | 2022-01-29T15:15:47.000Z | height = int(input())
for i in range(1,height+1):
for j in range(1,height*2+1):
if(j == height-i+1 or j == height+i-1):
c = chr(j+64)
print(c,end=" ")
else:
print(end=" ")
print()
# Sample Input :- 5
# Output :-
# E
# D F
# C G
# B H
# A I
| 17.863636 | 47 | 0.343511 |
7958825c6ebbc6e58cf52382d838a7ea695da876 | 1,363 | py | Python | azure-mgmt-media/azure/mgmt/media/models/list_container_sas_input.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-media/azure/mgmt/media/models/list_container_sas_input.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-media/azure/mgmt/media/models/list_container_sas_input.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ListContainerSasInput(Model):
"""The parameters to the list SAS requet.
:param permissions: The permissions to set on the SAS URL. Possible values
include: 'Read', 'ReadWrite', 'ReadWriteDelete'
:type permissions: str or
~azure.mgmt.media.models.AssetContainerPermission
:param expiry_time: The SAS URL expiration time. This must be less than
24 hours from the current time.
:type expiry_time: datetime
"""
_attribute_map = {
'permissions': {'key': 'permissions', 'type': 'AssetContainerPermission'},
'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(ListContainerSasInput, self).__init__(**kwargs)
self.permissions = kwargs.get('permissions', None)
self.expiry_time = kwargs.get('expiry_time', None)
| 37.861111 | 82 | 0.624358 |
79588358ddfa5476db2fca6a876680731bce13ed | 10,527 | py | Python | scripts/aux/opts.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | 2 | 2019-12-29T22:42:36.000Z | 2020-09-15T15:53:36.000Z | scripts/aux/opts.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | 7 | 2019-11-08T09:27:46.000Z | 2020-11-17T10:00:51.000Z | scripts/aux/opts.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | 2 | 2019-10-11T07:25:17.000Z | 2020-01-21T16:15:18.000Z | #
# CV is a framework for continuous verification.
#
# Copyright (c) 2018-2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# These are options like "-include header.h" with space between option and value
# Options with values that are not separated by space should not be included here
import re
import os
gcc_opts = [
"-x",
"-o",
"-aux-info",
"-D",
"-U",
"-include",
"-imacros",
"-MF",
"-MT",
"-MQ",
"-Xpreprocessor",
"-Xassembler",
"-l",
"-Xlinker",
"-T",
"-u",
"-z",
"-I",
"-iquote",
"-isystem",
"-idirafter",
"-iprefix",
"-iwithprefix",
"-iwithprefixbefore",
"-isysroot",
"-imultilib",
"-imultiarch",
"-auxbase",
"-dumpbase",
"-G"
]
clang_opts = [
"--CLASSPATH",
"--assert",
"--bootclasspath",
"--classpath",
"--config",
"--define-macro",
"--dyld-prefix",
"--encoding",
"--extdirs",
"--for-linker",
"--force-link",
"--include-directory",
"--include-directory-after",
"--include-prefix",
"--include-with-prefix",
"--include-with-prefix-after",
"--include-with-prefix-before",
"--language",
"--library-directory",
"--mhwdiv",
"--output",
"--output-class-directory",
"--param",
"--prefix",
"--print-file-name",
"--print-prog-name",
"--resource",
"--rtlib",
"--serialize-diagnostics",
"--std",
"--stdlib",
"--sysroot",
"--system-header-prefix",
"--undefine-macro",
"-F",
"-I",
"-MQ",
"-MT",
"-Wa-Wl-Wp-Xanalyzer",
"-Xanalyzer",
"-Xassembler",
"-Xclang",
"-Xcuda-fatbinary",
"-Xcuda-ptxas",
"-Xlinker",
"-Xopenmp-target",
"-Xopenmp-target=<triple>",
"-Xpreprocessor",
"-add-plugin",
"-allowable_client",
"-analyze-function",
"-analyzer-checker",
"-analyzer-config",
"-analyzer-constraints",
"-analyzer-disable-checker",
"-analyzer-inline-max-function-size",
"-analyzer-inline-max-stack-depth",
"-analyzer-inlining-mode",
"-analyzer-ipa",
"-analyzer-max-loop",
"-analyzer-max-nodes",
"-analyzer-output",
"-analyzer-purge",
"-analyzer-store",
"-arch",
"-arch_only",
"-arcmt-migrate-report-output",
"-ast-dump-filter",
"-ast-merge",
"-backend-option",
"-bundle_loader",
"-c-isystem",
"-chain-include",
"-code-completion-at",
"-coverage-file",
"-coverage-notes-file",
"-cxx-abi",
"-cxx-isystem",
"-dependency-dot",
"-dependency-file",
"-diagnostic-log-file",
"-dump-build-information",
"-dwarf-debug-flags",
"-dylib_file",
"-error-on-deserialized-decl",
"-exported_symbols_list",
"-fconstant-string-class",
"-fconstexpr-backtrace-limit",
"-fconstexpr-depth",
"-fdebug-compilation-dir",
"-fdiagnostics-format",
"-fdiagnostics-show-category",
"-ferror-limit",
"-filelist",
"-fmacro-backtrace-limit",
"-fmessage-length",
"-fmodule-cache-path",
"-fmodule-implementation-of",
"-fmodule-name",
"-fnew-alignment",
"-force_load",
"-framework",
"-frewrite-map-file",
"-ftabstop",
"-ftemplate-backtrace-limit",
"-ftemplate-depth",
"-ftrapv-handler",
"-fvisibility",
"-gcc-toolchain",
"-header-include-file",
"-idirafter",
"-iframework",
"-imacros",
"-image_base",
"-imultilib",
"-include",
"-include-pch",
"-include-pth",
"-init",
"-ino-system-prefix",
"-install_name",
"-internal-externc-isystem",
"-internal-isystem",
"-iprefix",
"-iquote",
"-isysroot",
"-isystem",
"-isystem-prefix",
"-iwithprefix",
"-iwithprefixbefore",
"-iwithsysroot",
"-lazy_framework",
"-lazy_library",
"-load",
"-main-file-name",
"-mcode-model",
"-mdebug-pass",
"-meabi",
"-mfloat-abi",
"-mlimit-float-precision",
"-mlink-bitcode-file",
"-mllvm",
"-module-dependency-dir",
"-mregparm",
"-mrelocation-model",
"-mt-migrate-directory",
"-mthread-model",
"-multiply_defined",
"-multiply_defined_unused",
"-o",
"-objc-isystem",
"-objcxx-isystem",
"-pic-level",
"-pie-level",
"-plugin-arg-plugin",
"-print-file-name-print-prog-name-remap-file",
"-read_only_relocs",
"-resource-dir",
"-rpath",
"-seg_addr_table",
"-seg_addr_table_filename",
"-segs_read_only_addr",
"-segs_read_write_addr",
"-serialize-diagnostic-file",
"-serialize-diagnostics",
"-stack-protector",
"-stack-protector-buffer-size",
"-target",
"-target-abi",
"-target-cpu",
"-target-feature",
"-target-linker-version",
"-token-cache",
"-triple",
"-umbrella",
"-unexported_symbols_list",
"-weak_framework",
"-weak_library",
"-weak_reference_mismatches",
"-working-directory",
"-x",
"-z",
]
preprocessor_deps_opts = [
"-M",
"-MM",
"-MF",
"-MG",
"-MP",
"-MT",
"-MQ",
"-MD",
"-MMD",
"-dependency-file",
]
# Warning: --start-group archives --end-group options are not supported
ld_gnu_opts = [
"--audit",
"--bank-window",
"--base-file",
"--dll-search-prefix",
"--exclude-libs",
"--exclude-modules-for-implib",
"--exclude-symbols",
"--heap",
"--image-base",
"--major-image-version",
"--major-os-version",
"--major-subsystem-version",
"--minor-image-version",
"--minor-os-version",
"--minor-subsystem-version",
"--output-def",
"--out-implib",
"--stack",
"--subsystem",
"-A",
"-F",
"-G",
"-L",
"-O",
"-P",
"-R",
"-T",
"-Y",
"-a",
"-assert",
"-b",
"-c",
"-dT",
"-e",
"-f",
"-h",
"-l",
"-m",
"-o",
"-u",
"-y",
"-z",
"-plugin",
"-dynamic-linker",
]
# Warning: next options are not supported:
# -alias symbol_name alternate_symbol_name option is not supported
# -move_to_rw_segment segment_name filename
# -move_to_ro_segment segment_name filename
# -rename_section orgSegment orgSection newSegment newSection
# -rename_segment orgSegment newSegment
# -section_order segname colon_separated_section_list
# -sectalign segname sectname value
# -segprot segname max_prot init_prot
# -sectobjectsymbols segname sectname
# -sectorder segname sectname orderfile
ld_osx_opts = [
"-A",
"-U",
"-Y",
"-alias_list",
"-allowable_client",
"-arch",
"-bitcode_symbol_map",
"-bundle_loader",
"-cache_path_lto",
"-client_name",
"-commons",
"-compatibility_version",
"-current_version",
"-dirty_data_list",
"-dot",
"-dtrace",
"-dylib_file",
"-dylinker_install_name",
"-e",
"-exported_symbol",
"-exported_symbols_list",
"-exported_symbols_order",
"-filelist",
"-final_output",
"-force_load",
"-framework",
"-headerpad",
"-image_base",
"-init",
"-install_name",
"-interposable_list",
"-ios_version_min",
"-lazy_framework",
"-lazy_library",
"-lto_library",
"-macosx_version_min",
"-max_default_common_align",
"-max_relative_cache_size_lto",
"-map",
"-multiply_defined",
"-multiply_defined_unused",
"-non_global_symbols_no_strip_list",
"-non_global_symbols_strip_list",
"-o",
"-object_path_lto",
"-order_file",
"-pagezero_size",
"-prune_after_lto",
"-prune_interval_lto",
"-read_only_relocs",
"-reexported_symbols_list",
"-sect_diff_relocs",
"-seg_addr_table",
"-seg_addr_table_filename",
"-segaddr",
"-segalign",
"-segment_order",
"-seg_page_size",
"-segs_read_only_addr",
"-segs_read_write_addr",
"-stack_size",
"-sub_library",
"-sub_umbrella",
"-syslibroot",
"-reexport_framework",
"-reexport_library",
"-rpath",
"-sectcreate",
"-stack_addr",
"-u",
"-umbrella",
"-undefined",
"-unexported_symbol",
"-unexported_symbols_list",
"-upward_framework",
"-upward_library",
"-weak_framework",
"-weak_library",
"-weak_reference_mismatches",
"-why_live",
]
# Warning: Input files may be separated from options by "--": -- | files ...
as_gnu_opts = [
"--debug-prefix-map",
"--defsym",
"-I",
"-o",
]
as_osx_opts = [
"-arch",
"-o",
]
objcopy_opts = [
"--add-section",
"--adjust-vma",
"--adjust-section-vma",
"--adjust-start",
"--change-addresses",
"--change-section-address",
"--change-section-lma",
"--change-section-vma",
"--change-start",
"--file-alignment",
"--gap-fill",
"--heap",
"--image-base",
"--long-section-names",
"--redefine-sym",
"--rename-section",
"--section-alignment",
"--set-section-flags",
"--set-start",
"--stack",
"--subsystem",
"-B",
"-F",
"-G",
"-I",
"-K",
"-L",
"-N",
"-O",
"-R",
"-W",
"-b",
"-i",
"-j",
]
requires_value = {
"CC": set(gcc_opts + clang_opts),
"LD": set(ld_gnu_opts + ld_osx_opts),
"AS": set(as_gnu_opts + as_osx_opts),
"Objcopy": set(objcopy_opts),
}
def filter_opts(opts: list, opts_to_filter: list) -> list:
if not opts_to_filter:
return opts
filtered_opts = []
# Make a regex that matches if any of the regexes match.
regex = re.compile("(" + ")|(".join(opts_to_filter) + ")")
opts = iter(opts)
for opt in opts:
if regex.match(opt):
if opt in requires_value["CC"]:
next(opts)
continue
m = re.search(r'-I(.*)', opt)
if m:
path = m.group(1)
if not path:
path = next(opts)
opt = "-I" + os.path.abspath(path)
filtered_opts.append(opt)
return filtered_opts
| 21.885655 | 81 | 0.566353 |
7958835d4c53b813fd5ef4cb9b4befe4a7dfe0f8 | 7,736 | py | Python | linedrawer.py | q1qgames/pixray | 8bd73869af7979068aa7ff8402f5b3ab2b791255 | [
"MIT"
] | 468 | 2021-11-23T08:05:15.000Z | 2022-03-30T13:16:43.000Z | linedrawer.py | q1qgames/pixray | 8bd73869af7979068aa7ff8402f5b3ab2b791255 | [
"MIT"
] | 53 | 2021-11-26T22:46:36.000Z | 2022-03-28T16:47:02.000Z | linedrawer.py | q1qgames/pixray | 8bd73869af7979068aa7ff8402f5b3ab2b791255 | [
"MIT"
] | 67 | 2021-11-24T11:34:05.000Z | 2022-03-28T15:17:45.000Z | # this is derived from ClipDraw code
# CLIPDraw: Exploring Text-to-Drawing Synthesis through Language-Image Encoders
# Kevin Frans, L.B. Soros, Olaf Witkowski
# https://arxiv.org/abs/2106.14843
from DrawingInterface import DrawingInterface
import pydiffvg
import torch
import skimage
import skimage.io
import random
import ttools.modules
import argparse
import math
import torchvision
import torchvision.transforms as transforms
import numpy as np
import PIL.Image
from util import str2bool
def bound(value, low, high):
return max(low, min(high, value))
class LineDrawer(DrawingInterface):
@staticmethod
def add_settings(parser):
parser.add_argument("--strokes", type=int, help="number strokes", default=24, dest='strokes')
parser.add_argument("--stroke_length", type=int, help="stroke length", default=8, dest='stroke_length')
parser.add_argument("--min_stroke_width", type=float, help="min width (percent of height)", default=0.5, dest='min_stroke_width')
parser.add_argument("--max_stroke_width", type=float, help="max width (percent of height)", default=2, dest='max_stroke_width')
parser.add_argument("--allow_paper_color", type=str2bool, help="allow paper color to change", default=False, dest='allow_paper_color')
return parser
def __init__(self, settings):
super(DrawingInterface, self).__init__()
self.canvas_width = settings.size[0]
self.canvas_height = settings.size[1]
self.num_paths = settings.strokes
self.stroke_length = settings.stroke_length
def load_model(self, settings, device):
# Use GPU if available
pydiffvg.set_use_gpu(torch.cuda.is_available())
device = torch.device('cuda')
pydiffvg.set_device(device)
canvas_width, canvas_height = self.canvas_width, self.canvas_height
num_paths = self.num_paths
max_width = settings.max_stroke_width * canvas_height / 100
min_width = settings.min_stroke_width * canvas_height / 100
shapes = []
shape_groups = []
color_vars = []
# background shape
p0 = [0, 0]
p1 = [canvas_width, canvas_height]
path = pydiffvg.Rect(p_min=torch.tensor(p0), p_max=torch.tensor(p1))
shapes.append(path)
# https://encycolorpedia.com/f2eecb
cell_color = torch.tensor([242/255.0, 238/255.0, 203/255.0, 1.0])
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes)-1]), stroke_color = None, fill_color = cell_color)
shape_groups.append(path_group)
if settings.allow_paper_color:
path_group.fill_color.requires_grad = True
color_vars.append(path_group.fill_color)
# Initialize Random Curves
for i in range(num_paths):
num_segments = self.stroke_length
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
points = []
radius = 0.5
radius_x = 0.5 #radius * canvas_height / canvas_width
p0 = (0.5 + radius_x * (random.random() - 0.5), 0.5 + radius * (random.random() - 0.5))
points.append(p0)
for j in range(num_segments):
radius = 1.0 / (num_segments + 2)
radius_x = radius * canvas_height / canvas_width
p1 = (p0[0] + radius_x * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5))
p2 = (p1[0] + radius_x * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5))
p3 = (p2[0] + radius_x * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5))
points.append(p1)
points.append(p2)
points.append(p3)
p0 = (bound(p3[0],0,1), bound(p3[1],0,1))
points = torch.tensor(points)
points[:, 0] *= canvas_width
points[:, 1] *= canvas_height
path = pydiffvg.Path(num_control_points = num_control_points, points = points, stroke_width = torch.tensor(max_width/10), is_closed = False)
shapes.append(path)
s_col = [0, 0, 0, 1]
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes)-1]), fill_color = None, stroke_color = torch.tensor(s_col))
shape_groups.append(path_group)
# Just some diffvg setup
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args)
points_vars = []
stroke_width_vars = []
for path in shapes[1:]:
path.points.requires_grad = True
points_vars.append(path.points)
path.stroke_width.requires_grad = True
stroke_width_vars.append(path.stroke_width)
# for group in shape_groups:
# group.stroke_color.requires_grad = True
# color_vars.append(group.stroke_color)
self.points_vars = points_vars
self.stroke_width_vars = stroke_width_vars
self.color_vars = color_vars
self.img = img
self.shapes = shapes
self.shape_groups = shape_groups
self.max_width = max_width
self.canvas_width = canvas_width
self.canvas_height = canvas_height
def get_opts(self, decay_divisor):
# Optimizers
points_optim = torch.optim.Adam(self.points_vars, lr=1.0/decay_divisor)
width_optim = torch.optim.Adam(self.stroke_width_vars, lr=0.1/decay_divisor)
opts = [points_optim, width_optim]
if len(self.color_vars) > 0:
color_optim = torch.optim.Adam(self.color_vars, lr=0.01/decay_divisor)
opts.append(color_optim)
return opts
def rand_init(self, toksX, toksY):
# TODO
pass
def init_from_tensor(self, init_tensor):
# TODO
pass
def reapply_from_tensor(self, new_tensor):
# TODO
pass
def get_z_from_tensor(self, ref_tensor):
return None
def get_num_resolutions(self):
return None
def synth(self, cur_iteration):
render = pydiffvg.RenderFunction.apply
scene_args = pydiffvg.RenderFunction.serialize_scene(\
self.canvas_width, self.canvas_height, self.shapes, self.shape_groups)
img = render(self.canvas_width, self.canvas_height, 2, 2, cur_iteration, None, *scene_args)
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0)
img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
self.img = img
return img
@torch.no_grad()
def to_image(self):
img = self.img.detach().cpu().numpy()[0]
img = np.transpose(img, (1, 2, 0))
img = np.clip(img, 0, 1)
img = np.uint8(img * 254)
# img = np.repeat(img, 4, axis=0)
# img = np.repeat(img, 4, axis=1)
pimg = PIL.Image.fromarray(img, mode="RGB")
return pimg
def clip_z(self):
with torch.no_grad():
for path in self.shapes[1:]:
path.stroke_width.data.clamp_(1.0, self.max_width)
for group in self.shape_groups[1:]:
group.stroke_color.data.clamp_(0.0, 1.0)
def get_z(self):
return None
def get_z_copy(self):
return None
def set_z(self, new_z):
return None
@torch.no_grad()
def to_svg(self):
pydiffvg.save_svg("./lineout.svg", self.canvas_width, self.canvas_height, self.shapes, self.shape_groups)
| 39.070707 | 152 | 0.624741 |
795883a0e14a9e7abc8063225b34732ddf3494fa | 393 | py | Python | egs/grand_challenge/inference.py | HongYun0901/ESPnet | 44f78734034991fed4f42359f4d15f15504680bd | [
"Apache-2.0"
] | 1 | 2021-12-22T06:04:44.000Z | 2021-12-22T06:04:44.000Z | egs/grand_challenge/inference.py | hongyuntw/ESPnet | 44f78734034991fed4f42359f4d15f15504680bd | [
"Apache-2.0"
] | null | null | null | egs/grand_challenge/inference.py | hongyuntw/ESPnet | 44f78734034991fed4f42359f4d15f15504680bd | [
"Apache-2.0"
] | null | null | null | import json
import matplotlib.pyplot as plt
import kaldiio
root = "/home/nlp/ASR/espnet/egs/FSW"
with open(root + "/dump/test/deltafalse/data.json", "r") as f:
test_json = json.load(f)["utts"]
key, info = list(test_json.items())[10]
fbank = kaldiio.load_mat(info["input"][0]["feat"])
# plot the speech feature
plt.matshow(fbank.T[::-1])
plt.title(key + ": " + info["output"][0]["text"]) | 28.071429 | 62 | 0.671756 |
7958840b50a855cc7287a88dc98c83ab6d64dad8 | 1,550 | py | Python | src/twisted/python/test/modules_helpers.py | apjanke/twisted | 22f949f7ce187513f0c218b73186c8a73baa00b4 | [
"Unlicense",
"MIT"
] | 1 | 2021-01-03T01:54:14.000Z | 2021-01-03T01:54:14.000Z | src/twisted/python/test/modules_helpers.py | zerospam/twisted | e23b5e2040a4d643bc6a43785621358569886a0d | [
"MIT",
"Unlicense"
] | null | null | null | src/twisted/python/test/modules_helpers.py | zerospam/twisted | e23b5e2040a4d643bc6a43785621358569886a0d | [
"MIT",
"Unlicense"
] | null | null | null | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Facilities for helping test code which interacts with Python's module system
to load code.
"""
import sys
from twisted.python.filepath import FilePath
class TwistedModulesMixin:
"""
A mixin for C{twisted.trial.unittest.SynchronousTestCase} providing useful
methods for manipulating Python's module system.
"""
def replaceSysPath(self, sysPath):
"""
Replace sys.path, for the duration of the test, with the given value.
"""
originalSysPath = sys.path[:]
def cleanUpSysPath():
sys.path[:] = originalSysPath
self.addCleanup(cleanUpSysPath)
sys.path[:] = sysPath
def replaceSysModules(self, sysModules):
"""
Replace sys.modules, for the duration of the test, with the given value.
"""
originalSysModules = sys.modules.copy()
def cleanUpSysModules():
sys.modules.clear()
sys.modules.update(originalSysModules)
self.addCleanup(cleanUpSysModules)
sys.modules.clear()
sys.modules.update(sysModules)
def pathEntryWithOnePackage(self, pkgname="test_package"):
"""
Generate a L{FilePath} with one package, named C{pkgname}, on it, and
return the L{FilePath} of the path entry.
"""
entry = FilePath(self.mktemp())
pkg = entry.child("test_package")
pkg.makedirs()
pkg.child("__init__.py").setContent(b"")
return entry
| 27.192982 | 80 | 0.637419 |
795884adcf7f7098bb8940f07630d9159dc74406 | 63,366 | py | Python | ssg/build_yaml.py | heryxpc/content | 5293788149707ae46d8e217ac106398e2c996493 | [
"BSD-3-Clause"
] | null | null | null | ssg/build_yaml.py | heryxpc/content | 5293788149707ae46d8e217ac106398e2c996493 | [
"BSD-3-Clause"
] | null | null | null | ssg/build_yaml.py | heryxpc/content | 5293788149707ae46d8e217ac106398e2c996493 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import os
import os.path
from collections import defaultdict
from copy import deepcopy
import datetime
import re
import sys
from xml.sax.saxutils import escape
import yaml
from .build_cpe import CPEDoesNotExist
from .constants import XCCDF_REFINABLE_PROPERTIES
from .rules import get_rule_dir_id, get_rule_dir_yaml, is_rule_dir
from .rule_yaml import parse_prodtype
from .controls import Control
from .checks import is_cce_format_valid, is_cce_value_valid
from .yaml import DocumentationNotComplete, open_and_expand, open_and_macro_expand
from .utils import required_key, mkdir_p
from .xml import ElementTree as ET
from .shims import unicode_func
def add_sub_element(parent, tag, data):
"""
Creates a new child element under parent with tag tag, and sets
data as the content under the tag. In particular, data is a string
to be parsed as an XML tree, allowing sub-elements of children to be
added.
If data should not be parsed as an XML tree, either escape the contents
before passing into this function, or use ElementTree.SubElement().
Returns the newly created subelement of type tag.
"""
# This is used because our YAML data contain XML and XHTML elements
# ET.SubElement() escapes the < > characters by < and >
# and therefore it does not add child elements
# we need to do a hack instead
# TODO: Remove this function after we move to Markdown everywhere in SSG
ustr = unicode_func("<{0}>{1}</{0}>").format(tag, data)
try:
element = ET.fromstring(ustr.encode("utf-8"))
except Exception:
msg = ("Error adding subelement to an element '{0}' from string: '{1}'"
.format(parent.tag, ustr))
raise RuntimeError(msg)
parent.append(element)
return element
def reorder_according_to_ordering(unordered, ordering, regex=None):
ordered = []
if regex is None:
regex = "|".join(["({0})".format(item) for item in ordering])
regex = re.compile(regex)
items_to_order = list(filter(regex.match, unordered))
unordered = set(unordered)
for priority_type in ordering:
for item in items_to_order:
if priority_type in item and item in unordered:
ordered.append(item)
unordered.remove(item)
ordered.extend(list(unordered))
return ordered
def add_warning_elements(element, warnings):
# The use of [{dict}, {dict}] in warnings is to handle the following
# scenario where multiple warnings have the same category which is
# valid in SCAP and our content:
#
# warnings:
# - general: Some general warning
# - general: Some other general warning
# - general: |-
# Some really long multiline general warning
#
# Each of the {dict} should have only one key/value pair.
for warning_dict in warnings:
warning = add_sub_element(element, "warning", list(warning_dict.values())[0])
warning.set("category", list(warning_dict.keys())[0])
def add_nondata_subelements(element, subelement, attribute, attr_data):
"""Add multiple iterations of a sublement that contains an attribute but no data
For example, <requires id="my_required_id"/>"""
for data in attr_data:
req = ET.SubElement(element, subelement)
req.set(attribute, data)
class Profile(object):
"""Represents XCCDF profile
"""
def __init__(self, id_):
self.id_ = id_
self.title = ""
self.description = ""
self.extends = None
self.selected = []
self.unselected = []
self.variables = dict()
self.refine_rules = defaultdict(list)
self.metadata = None
self.reference = None
# self.platforms is used further in the build system
# self.platform is merged into self.platforms
# it is here for backward compatibility
self.platforms = set()
self.cpe_names = set()
self.platform = None
def read_yaml_contents(self, yaml_contents):
self.title = required_key(yaml_contents, "title")
del yaml_contents["title"]
self.description = required_key(yaml_contents, "description")
del yaml_contents["description"]
self.extends = yaml_contents.pop("extends", None)
selection_entries = required_key(yaml_contents, "selections")
if selection_entries:
self._parse_selections(selection_entries)
del yaml_contents["selections"]
self.platforms = yaml_contents.pop("platforms", set())
self.platform = yaml_contents.pop("platform", None)
@classmethod
def from_yaml(cls, yaml_file, env_yaml=None):
yaml_contents = open_and_expand(yaml_file, env_yaml)
if yaml_contents is None:
return None
basename, _ = os.path.splitext(os.path.basename(yaml_file))
profile = cls(basename)
profile.read_yaml_contents(yaml_contents)
profile.reference = yaml_contents.pop("reference", None)
# ensure that content of profile.platform is in profile.platforms as
# well
if profile.platform is not None:
profile.platforms.add(profile.platform)
if env_yaml:
for platform in profile.platforms:
try:
profile.cpe_names.add(env_yaml["product_cpes"].get_cpe_name(platform))
except CPEDoesNotExist:
print("Unsupported platform '%s' in profile '%s'." % (platform, profile.id_))
raise
# At the moment, metadata is not used to build content
if "metadata" in yaml_contents:
del yaml_contents["metadata"]
if yaml_contents:
raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s"
% (yaml_file, yaml_contents))
return profile
def dump_yaml(self, file_name, documentation_complete=True):
to_dump = {}
to_dump["documentation_complete"] = documentation_complete
to_dump["title"] = self.title
to_dump["description"] = self.description
to_dump["reference"] = self.reference
if self.metadata is not None:
to_dump["metadata"] = self.metadata
if self.extends is not None:
to_dump["extends"] = self.extends
if self.platforms:
to_dump["platforms"] = self.platforms
selections = []
for item in self.selected:
selections.append(item)
for item in self.unselected:
selections.append("!"+item)
for varname in self.variables.keys():
selections.append(varname+"="+self.variables.get(varname))
for rule, refinements in self.refine_rules.items():
for prop, val in refinements:
selections.append("{rule}.{property}={value}"
.format(rule=rule, property=prop, value=val))
to_dump["selections"] = selections
with open(file_name, "w+") as f:
yaml.dump(to_dump, f, indent=4)
def _parse_selections(self, entries):
for item in entries:
self.apply_selection(item)
def apply_selection(self, item):
if "." in item:
rule, refinement = item.split(".", 1)
property_, value = refinement.split("=", 1)
if property_ not in XCCDF_REFINABLE_PROPERTIES:
msg = ("Property '{property_}' cannot be refined. "
"Rule properties that can be refined are {refinables}. "
"Fix refinement '{rule_id}.{property_}={value}' in profile '{profile}'."
.format(property_=property_, refinables=XCCDF_REFINABLE_PROPERTIES,
rule_id=rule, value=value, profile=self.id_)
)
raise ValueError(msg)
self.refine_rules[rule].append((property_, value))
elif "=" in item:
varname, value = item.split("=", 1)
self.variables[varname] = value
elif item.startswith("!"):
self.unselected.append(item[1:])
else:
self.selected.append(item)
def to_xml_element(self):
element = ET.Element('Profile')
element.set("id", self.id_)
if self.extends:
element.set("extends", self.extends)
title = add_sub_element(element, "title", self.title)
title.set("override", "true")
desc = add_sub_element(element, "description", self.description)
desc.set("override", "true")
if self.reference:
add_sub_element(element, "reference", escape(self.reference))
for cpe_name in self.cpe_names:
plat = ET.SubElement(element, "platform")
plat.set("idref", cpe_name)
for selection in self.selected:
select = ET.Element("select")
select.set("idref", selection)
select.set("selected", "true")
element.append(select)
for selection in self.unselected:
unselect = ET.Element("select")
unselect.set("idref", selection)
unselect.set("selected", "false")
element.append(unselect)
for value_id, selector in self.variables.items():
refine_value = ET.Element("refine-value")
refine_value.set("idref", value_id)
refine_value.set("selector", selector)
element.append(refine_value)
for refined_rule, refinement_list in self.refine_rules.items():
refine_rule = ET.Element("refine-rule")
refine_rule.set("idref", refined_rule)
for refinement in refinement_list:
refine_rule.set(refinement[0], refinement[1])
element.append(refine_rule)
return element
def get_rule_selectors(self):
return list(self.selected + self.unselected)
def get_variable_selectors(self):
return self.variables
def validate_refine_rules(self, rules):
existing_rule_ids = [r.id_ for r in rules]
for refine_rule, refinement_list in self.refine_rules.items():
# Take first refinement to ilustrate where the error is
# all refinements in list are invalid, so it doesn't really matter
a_refinement = refinement_list[0]
if refine_rule not in existing_rule_ids:
msg = (
"You are trying to refine a rule that doesn't exist. "
"Rule '{rule_id}' was not found in the benchmark. "
"Please check all rule refinements for rule: '{rule_id}', for example: "
"- {rule_id}.{property_}={value}' in profile {profile_id}."
.format(rule_id=refine_rule, profile_id=self.id_,
property_=a_refinement[0], value=a_refinement[1])
)
raise ValueError(msg)
if refine_rule not in self.get_rule_selectors():
msg = ("- {rule_id}.{property_}={value}' in profile '{profile_id}' is refining "
"a rule that is not selected by it. The refinement will not have any "
"noticeable effect. Either select the rule or remove the rule refinement."
.format(rule_id=refine_rule, property_=a_refinement[0],
value=a_refinement[1], profile_id=self.id_)
)
raise ValueError(msg)
def validate_variables(self, variables):
variables_by_id = dict()
for var in variables:
variables_by_id[var.id_] = var
for var_id, our_val in self.variables.items():
if var_id not in variables_by_id:
all_vars_list = [" - %s" % v for v in variables_by_id.keys()]
msg = (
"Value '{var_id}' in profile '{profile_name}' is not known. "
"We know only variables:\n{var_names}"
.format(
var_id=var_id, profile_name=self.id_,
var_names="\n".join(sorted(all_vars_list)))
)
raise ValueError(msg)
allowed_selectors = [str(s) for s in variables_by_id[var_id].options.keys()]
if our_val not in allowed_selectors:
msg = (
"Value '{var_id}' in profile '{profile_name}' "
"uses the selector '{our_val}'. "
"This is not possible, as only selectors {all_selectors} are available. "
"Either change the selector used in the profile, or "
"add the selector-value pair to the variable definition."
.format(
var_id=var_id, profile_name=self.id_, our_val=our_val,
all_selectors=allowed_selectors,
)
)
raise ValueError(msg)
def validate_rules(self, rules, groups):
existing_rule_ids = [r.id_ for r in rules]
rule_selectors = self.get_rule_selectors()
for id_ in rule_selectors:
if id_ in groups:
msg = (
"You have selected a group '{group_id}' instead of a "
"rule. Groups have no effect in the profile and are not "
"allowed to be selected. Please remove '{group_id}' "
"from profile '{profile_id}' before proceeding."
.format(group_id=id_, profile_id=self.id_)
)
raise ValueError(msg)
if id_ not in existing_rule_ids:
msg = (
"Rule '{rule_id}' was not found in the benchmark. Please "
"remove rule '{rule_id}' from profile '{profile_id}' "
"before proceeding."
.format(rule_id=id_, profile_id=self.id_)
)
raise ValueError(msg)
def __sub__(self, other):
profile = Profile(self.id_)
profile.title = self.title
profile.description = self.description
profile.extends = self.extends
profile.platforms = self.platforms
profile.platform = self.platform
profile.selected = list(set(self.selected) - set(other.selected))
profile.selected.sort()
profile.unselected = list(set(self.unselected) - set(other.unselected))
profile.variables = dict ((k, v) for (k, v) in self.variables.items()
if k not in other.variables or v != other.variables[k])
return profile
class ResolvableProfile(Profile):
def __init__(self, * args, ** kwargs):
super(ResolvableProfile, self).__init__(* args, ** kwargs)
self.resolved = False
self.resolved_selections = set()
def _controls_ids_to_controls(self, controls_manager, policy_id, control_id_list):
items = [controls_manager.get_control(policy_id, cid) for cid in control_id_list]
return items
def _merge_control(self, control):
self.selected.extend(control.rules)
for varname, value in control.variables.items():
if varname not in self.variables:
self.variables[varname] = value
def resolve_controls(self, controls_manager):
pass
def extend_by(self, extended_profile):
extended_selects = set(extended_profile.selected)
self.resolved_selections.update(extended_selects)
updated_variables = dict(extended_profile.variables)
updated_variables.update(self.variables)
self.variables = updated_variables
extended_refinements = deepcopy(extended_profile.refine_rules)
updated_refinements = self._subtract_refinements(extended_refinements)
updated_refinements.update(self.refine_rules)
self.refine_rules = updated_refinements
def resolve(self, all_profiles, controls_manager=None):
if self.resolved:
return
self.resolve_controls(controls_manager)
self.resolved_selections = set(self.selected)
if self.extends:
if self.extends not in all_profiles:
msg = (
"Profile {name} extends profile {extended}, but "
"only profiles {known_profiles} are available for resolution."
.format(name=self.id_, extended=self.extends,
known_profiles=list(all_profiles.keys())))
raise RuntimeError(msg)
extended_profile = all_profiles[self.extends]
extended_profile.resolve(all_profiles, controls_manager)
self.extend_by(extended_profile)
for uns in self.unselected:
self.resolved_selections.discard(uns)
self.unselected = []
self.extends = None
self.selected = sorted(self.resolved_selections)
self.resolved = True
def _subtract_refinements(self, extended_refinements):
"""
Given a dict of rule refinements from the extended profile,
"undo" every refinement prefixed with '!' in this profile.
"""
for rule, refinements in list(self.refine_rules.items()):
if rule.startswith("!"):
for prop, val in refinements:
extended_refinements[rule[1:]].remove((prop, val))
del self.refine_rules[rule]
return extended_refinements
class ProfileWithSeparatePolicies(ResolvableProfile):
def __init__(self, * args, ** kwargs):
super(ProfileWithSeparatePolicies, self).__init__(* args, ** kwargs)
self.policies = {}
def read_yaml_contents(self, yaml_contents):
policies = yaml_contents.pop("policies", None)
if policies:
self._parse_policies(policies)
super(ProfileWithSeparatePolicies, self).read_yaml_contents(yaml_contents)
def _parse_policies(self, policies_yaml):
for item in policies_yaml:
id_ = required_key(item, "id")
controls_ids = required_key(item, "controls")
if not isinstance(controls_ids, list):
if controls_ids != "all":
msg = (
"Policy {id_} contains invalid controls list {controls}."
.format(id_=id_, controls=str(controls_ids)))
raise ValueError(msg)
self.policies[id_] = controls_ids
def _process_controls_ids_into_controls(self, controls_manager, policy_id, controls_ids):
controls = []
for cid in controls_ids:
if not cid.startswith("all"):
controls.extend(
self._controls_ids_to_controls(controls_manager, policy_id, [cid]))
elif ":" in cid:
_, level_id = cid.split(":", 1)
controls.extend(
controls_manager.get_all_controls_of_level(policy_id, level_id))
else:
controls.extend(controls_manager.get_all_controls(policy_id))
return controls
def resolve_controls(self, controls_manager):
for policy_id, controls_ids in self.policies.items():
controls = []
if isinstance(controls_ids, list):
controls = self._process_controls_ids_into_controls(
controls_manager, policy_id, controls_ids)
elif controls_ids.startswith("all"):
controls = self._process_controls_ids_into_controls(
controls_manager, policy_id, [controls_ids])
else:
msg = (
"Unknown policy content {content} in profile {profile_id}"
.format(content=controls_ids, profile_id=self.id_))
raise ValueError(msg)
for c in controls:
self._merge_control(c)
def extend_by(self, extended_profile):
self.policies.update(extended_profile.policies)
super(ProfileWithSeparatePolicies, self).extend_by(extended_profile)
class ProfileWithInlinePolicies(ResolvableProfile):
def __init__(self, * args, ** kwargs):
super(ProfileWithInlinePolicies, self).__init__(* args, ** kwargs)
self.controls_by_policy = defaultdict(list)
def apply_selection(self, item):
# ":" is the delimiter for controls but not when the item is a variable
if ":" in item and "=" not in item:
policy_id, control_id = item.split(":", 1)
self.controls_by_policy[policy_id].append(control_id)
else:
super(ProfileWithInlinePolicies, self).apply_selection(item)
def _process_controls_ids_into_controls(self, controls_manager, policy_id, controls_ids):
controls = []
for cid in controls_ids:
if not cid.startswith("all"):
controls.extend(
self._controls_ids_to_controls(controls_manager, policy_id, [cid]))
elif ":" in cid:
_, level_id = cid.split(":", 1)
controls.extend(
controls_manager.get_all_controls_of_level(policy_id, level_id))
else:
controls.extend(
controls_manager.get_all_controls(policy_id))
return controls
def resolve_controls(self, controls_manager):
for policy_id, controls_ids in self.controls_by_policy.items():
controls = self._process_controls_ids_into_controls(
controls_manager, policy_id, controls_ids)
for c in controls:
self._merge_control(c)
class Value(object):
"""Represents XCCDF Value
"""
def __init__(self, id_):
self.id_ = id_
self.title = ""
self.description = ""
self.type_ = "string"
self.operator = "equals"
self.interactive = False
self.options = {}
self.warnings = []
@staticmethod
def from_yaml(yaml_file, env_yaml=None):
yaml_contents = open_and_macro_expand(yaml_file, env_yaml)
if yaml_contents is None:
return None
value_id, _ = os.path.splitext(os.path.basename(yaml_file))
value = Value(value_id)
value.title = required_key(yaml_contents, "title")
del yaml_contents["title"]
value.description = required_key(yaml_contents, "description")
del yaml_contents["description"]
value.type_ = required_key(yaml_contents, "type")
del yaml_contents["type"]
value.operator = yaml_contents.pop("operator", "equals")
possible_operators = ["equals", "not equal", "greater than",
"less than", "greater than or equal",
"less than or equal", "pattern match"]
if value.operator not in possible_operators:
raise ValueError(
"Found an invalid operator value '%s' in '%s'. "
"Expected one of: %s"
% (value.operator, yaml_file, ", ".join(possible_operators))
)
value.interactive = \
yaml_contents.pop("interactive", "false").lower() == "true"
value.options = required_key(yaml_contents, "options")
del yaml_contents["options"]
value.warnings = yaml_contents.pop("warnings", [])
for warning_list in value.warnings:
if len(warning_list) != 1:
raise ValueError("Only one key/value pair should exist for each dictionary")
if yaml_contents:
raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s"
% (yaml_file, yaml_contents))
return value
def to_xml_element(self):
value = ET.Element('Value')
value.set('id', self.id_)
value.set('type', self.type_)
if self.operator != "equals": # equals is the default
value.set('operator', self.operator)
if self.interactive: # False is the default
value.set('interactive', 'true')
title = ET.SubElement(value, 'title')
title.text = self.title
add_sub_element(value, 'description', self.description)
add_warning_elements(value, self.warnings)
for selector, option in self.options.items():
# do not confuse Value with big V with value with small v
# value is child element of Value
value_small = ET.SubElement(value, 'value')
# by XCCDF spec, default value is value without selector
if selector != "default":
value_small.set('selector', str(selector))
value_small.text = str(option)
return value
def to_file(self, file_name):
root = self.to_xml_element()
tree = ET.ElementTree(root)
tree.write(file_name)
class Benchmark(object):
"""Represents XCCDF Benchmark
"""
def __init__(self, id_):
self.id_ = id_
self.title = ""
self.status = ""
self.description = ""
self.notice_id = ""
self.notice_description = ""
self.front_matter = ""
self.rear_matter = ""
self.cpes = []
self.version = "0.1"
self.profiles = []
self.values = {}
self.bash_remediation_fns_group = None
self.groups = {}
self.rules = {}
self.product_cpe_names = []
# This is required for OCIL clauses
conditional_clause = Value("conditional_clause")
conditional_clause.title = "A conditional clause for check statements."
conditional_clause.description = conditional_clause.title
conditional_clause.type_ = "string"
conditional_clause.options = {"": "This is a placeholder"}
self.add_value(conditional_clause)
@classmethod
def from_yaml(cls, yaml_file, id_, env_yaml=None):
yaml_contents = open_and_macro_expand(yaml_file, env_yaml)
if yaml_contents is None:
return None
benchmark = cls(id_)
benchmark.title = required_key(yaml_contents, "title")
del yaml_contents["title"]
benchmark.status = required_key(yaml_contents, "status")
del yaml_contents["status"]
benchmark.description = required_key(yaml_contents, "description")
del yaml_contents["description"]
notice_contents = required_key(yaml_contents, "notice")
benchmark.notice_id = required_key(notice_contents, "id")
del notice_contents["id"]
benchmark.notice_description = required_key(notice_contents,
"description")
del notice_contents["description"]
if not notice_contents:
del yaml_contents["notice"]
benchmark.front_matter = required_key(yaml_contents,
"front-matter")
del yaml_contents["front-matter"]
benchmark.rear_matter = required_key(yaml_contents,
"rear-matter")
del yaml_contents["rear-matter"]
benchmark.version = str(required_key(yaml_contents, "version"))
del yaml_contents["version"]
if env_yaml:
benchmark.product_cpe_names = env_yaml["product_cpes"].get_product_cpe_names()
if yaml_contents:
raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s"
% (yaml_file, yaml_contents))
return benchmark
def add_profiles_from_dir(self, dir_, env_yaml):
for dir_item in sorted(os.listdir(dir_)):
dir_item_path = os.path.join(dir_, dir_item)
if not os.path.isfile(dir_item_path):
continue
_, ext = os.path.splitext(os.path.basename(dir_item_path))
if ext != '.profile':
sys.stderr.write(
"Encountered file '%s' while looking for profiles, "
"extension '%s' is unknown. Skipping..\n"
% (dir_item, ext)
)
continue
try:
new_profile = ProfileWithInlinePolicies.from_yaml(dir_item_path, env_yaml)
except DocumentationNotComplete:
continue
except Exception as exc:
msg = ("Error building profile from '{fname}': '{error}'"
.format(fname=dir_item_path, error=str(exc)))
raise RuntimeError(msg)
if new_profile is None:
continue
self.profiles.append(new_profile)
def add_bash_remediation_fns_from_file(self, file_):
if not file_:
# bash-remediation-functions.xml doens't exist
return
tree = ET.parse(file_)
self.bash_remediation_fns_group = tree.getroot()
def to_xml_element(self):
root = ET.Element('Benchmark')
root.set('xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance')
root.set('xmlns:xhtml', 'http://www.w3.org/1999/xhtml')
root.set('xmlns:dc', 'http://purl.org/dc/elements/1.1/')
root.set('id', 'product-name')
root.set('xsi:schemaLocation',
'http://checklists.nist.gov/xccdf/1.1 xccdf-1.1.4.xsd')
root.set('style', 'SCAP_1.1')
root.set('resolved', 'false')
root.set('xml:lang', 'en-US')
status = ET.SubElement(root, 'status')
status.set('date', datetime.date.today().strftime("%Y-%m-%d"))
status.text = self.status
add_sub_element(root, "title", self.title)
add_sub_element(root, "description", self.description)
notice = add_sub_element(root, "notice", self.notice_description)
notice.set('id', self.notice_id)
add_sub_element(root, "front-matter", self.front_matter)
add_sub_element(root, "rear-matter", self.rear_matter)
# The Benchmark applicability is determined by the CPEs
# defined in the product.yml
for cpe_name in self.product_cpe_names:
plat = ET.SubElement(root, "platform")
plat.set("idref", cpe_name)
version = ET.SubElement(root, 'version')
version.text = self.version
ET.SubElement(root, "metadata")
for profile in self.profiles:
root.append(profile.to_xml_element())
for value in self.values.values():
root.append(value.to_xml_element())
if self.bash_remediation_fns_group is not None:
root.append(self.bash_remediation_fns_group)
groups_in_bench = list(self.groups.keys())
priority_order = ["system", "services"]
groups_in_bench = reorder_according_to_ordering(groups_in_bench, priority_order)
# Make system group the first, followed by services group
for group_id in groups_in_bench:
group = self.groups.get(group_id)
# Products using application benchmark don't have system or services group
if group is not None:
root.append(group.to_xml_element())
for rule in self.rules.values():
root.append(rule.to_xml_element())
return root
def to_file(self, file_name, ):
root = self.to_xml_element()
tree = ET.ElementTree(root)
tree.write(file_name)
def add_value(self, value):
if value is None:
return
self.values[value.id_] = value
# The benchmark is also considered a group, so this function signature needs to match
# Group()'s add_group()
def add_group(self, group, env_yaml=None):
if group is None:
return
self.groups[group.id_] = group
def add_rule(self, rule):
if rule is None:
return
self.rules[rule.id_] = rule
def to_xccdf(self):
"""We can easily extend this script to generate a valid XCCDF instead
of SSG SHORTHAND.
"""
raise NotImplementedError
def __str__(self):
return self.id_
class Group(object):
"""Represents XCCDF Group
"""
ATTRIBUTES_TO_PASS_ON = (
"platforms",
)
def __init__(self, id_):
self.id_ = id_
self.prodtype = "all"
self.title = ""
self.description = ""
self.warnings = []
self.requires = []
self.conflicts = []
self.values = {}
self.groups = {}
self.rules = {}
# self.platforms is used further in the build system
# self.platform is merged into self.platforms
# it is here for backward compatibility
self.platforms = set()
self.cpe_names = set()
self.platform = None
@classmethod
def from_yaml(cls, yaml_file, env_yaml=None):
yaml_contents = open_and_macro_expand(yaml_file, env_yaml)
if yaml_contents is None:
return None
group_id = os.path.basename(os.path.dirname(yaml_file))
group = cls(group_id)
group.prodtype = yaml_contents.pop("prodtype", "all")
group.title = required_key(yaml_contents, "title")
del yaml_contents["title"]
group.description = required_key(yaml_contents, "description")
del yaml_contents["description"]
group.warnings = yaml_contents.pop("warnings", [])
group.conflicts = yaml_contents.pop("conflicts", [])
group.requires = yaml_contents.pop("requires", [])
group.platform = yaml_contents.pop("platform", None)
group.platforms = yaml_contents.pop("platforms", set())
# ensure that content of group.platform is in group.platforms as
# well
if group.platform is not None:
group.platforms.add(group.platform)
if env_yaml:
for platform in group.platforms:
try:
group.cpe_names.add(env_yaml["product_cpes"].get_cpe_name(platform))
except CPEDoesNotExist:
print("Unsupported platform '%s' in group '%s'." % (platform, group.id_))
raise
for warning_list in group.warnings:
if len(warning_list) != 1:
raise ValueError("Only one key/value pair should exist for each dictionary")
if yaml_contents:
raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s"
% (yaml_file, yaml_contents))
group.validate_prodtype(yaml_file)
return group
def validate_prodtype(self, yaml_file):
for ptype in self.prodtype.split(","):
if ptype.strip() != ptype:
msg = (
"Comma-separated '{prodtype}' prodtype "
"in {yaml_file} contains whitespace."
.format(prodtype=self.prodtype, yaml_file=yaml_file))
raise ValueError(msg)
def to_xml_element(self):
group = ET.Element('Group')
group.set('id', self.id_)
if self.prodtype != "all":
group.set("prodtype", self.prodtype)
title = ET.SubElement(group, 'title')
title.text = self.title
add_sub_element(group, 'description', self.description)
add_warning_elements(group, self.warnings)
add_nondata_subelements(group, "requires", "id", self.requires)
add_nondata_subelements(group, "conflicts", "id", self.conflicts)
for cpe_name in self.cpe_names:
platform_el = ET.SubElement(group, "platform")
platform_el.set("idref", cpe_name)
for _value in self.values.values():
group.append(_value.to_xml_element())
# Rules that install or remove packages affect remediation
# of other rules.
# When packages installed/removed rules come first:
# The Rules are ordered in more logical way, and
# remediation order is natural, first the package is installed, then configured.
rules_in_group = list(self.rules.keys())
regex = r'(package_.*_(installed|removed))|(service_.*_(enabled|disabled))$'
priority_order = ["installed", "removed", "enabled", "disabled"]
rules_in_group = reorder_according_to_ordering(rules_in_group, priority_order, regex)
# Add rules in priority order, first all packages installed, then removed,
# followed by services enabled, then disabled
for rule_id in rules_in_group:
group.append(self.rules.get(rule_id).to_xml_element())
# Add the sub groups after any current level group rules.
# As package installed/removed and service enabled/disabled rules are usuallly in
# top level group, this ensures groups that further configure a package or service
# are after rules that install or remove it.
groups_in_group = list(self.groups.keys())
priority_order = [
# Make sure rpm_verify_(hashes|permissions|ownership) are run before any other rule.
# Due to conflicts between rules rpm_verify_* rules and any rule that configures
# stricter settings, like file_permissions_grub2_cfg and sudo_dedicated_group,
# the rules deviating from the system default should be evaluated later.
# So that in the end the system has contents, permissions and ownership reset, and
# any deviations or stricter settings are applied by the rules in the profile.
"software", "integrity", "integrity-software", "rpm_verification",
# The account group has to precede audit group because
# the rule package_screen_installed is desired to be executed before the rule
# audit_rules_privileged_commands, othervise the rule
# does not catch newly installed screen binary during remediation
# and report fail
"accounts", "auditing",
# The FIPS group should come before Crypto,
# if we want to set a different (stricter) Crypto Policy than FIPS.
"fips", "crypto",
# The firewalld_activation must come before ruleset_modifications, othervise
# remediations for ruleset_modifications won't work
"firewalld_activation", "ruleset_modifications",
# Rules from group disabling_ipv6 must precede rules from configuring_ipv6,
# otherwise the remediation prints error although it is successful
"disabling_ipv6", "configuring_ipv6"
]
groups_in_group = reorder_according_to_ordering(groups_in_group, priority_order)
for group_id in groups_in_group:
_group = self.groups[group_id]
group.append(_group.to_xml_element())
return group
def to_file(self, file_name):
root = self.to_xml_element()
tree = ET.ElementTree(root)
tree.write(file_name)
def add_value(self, value):
if value is None:
return
self.values[value.id_] = value
def add_group(self, group, env_yaml=None):
if group is None:
return
if self.platforms and not group.platforms:
group.platforms = self.platforms
self.groups[group.id_] = group
self._pass_our_properties_on_to(group)
# Once the group has inherited properties, update cpe_names
if env_yaml:
for platform in group.platforms:
try:
group.cpe_names.add(env_yaml["product_cpes"].get_cpe_name(platform))
except CPEDoesNotExist:
print("Unsupported platform '%s' in group '%s'." % (platform, group.id_))
raise
def _pass_our_properties_on_to(self, obj):
for attr in self.ATTRIBUTES_TO_PASS_ON:
if hasattr(obj, attr) and getattr(obj, attr) is None:
setattr(obj, attr, getattr(self, attr))
def add_rule(self, rule, env_yaml=None):
if rule is None:
return
if self.platforms and not rule.platforms:
rule.platforms = self.platforms
self.rules[rule.id_] = rule
self._pass_our_properties_on_to(rule)
# Once the rule has inherited properties, update cpe_names
if env_yaml:
for platform in rule.platforms:
try:
rule.cpe_names.add(env_yaml["product_cpes"].get_cpe_name(platform))
except CPEDoesNotExist:
print("Unsupported platform '%s' in rule '%s'." % (platform, rule.id_))
raise
def __str__(self):
return self.id_
class Rule(object):
"""Represents XCCDF Rule
"""
YAML_KEYS_DEFAULTS = {
"prodtype": lambda: "all",
"title": lambda: RuntimeError("Missing key 'title'"),
"description": lambda: RuntimeError("Missing key 'description'"),
"rationale": lambda: RuntimeError("Missing key 'rationale'"),
"severity": lambda: RuntimeError("Missing key 'severity'"),
"references": lambda: dict(),
"identifiers": lambda: dict(),
"ocil_clause": lambda: None,
"ocil": lambda: None,
"oval_external_content": lambda: None,
"warnings": lambda: list(),
"conflicts": lambda: list(),
"requires": lambda: list(),
"platform": lambda: None,
"platforms": lambda: set(),
"inherited_platforms": lambda: list(),
"template": lambda: None,
"definition_location": lambda: None,
}
PRODUCT_REFERENCES = ("stigid", "cis",)
GLOBAL_REFERENCES = ("srg", "vmmsrg", "disa", "cis-csc",)
def __init__(self, id_):
self.id_ = id_
self.prodtype = "all"
self.title = ""
self.description = ""
self.definition_location = ""
self.rationale = ""
self.severity = "unknown"
self.references = {}
self.identifiers = {}
self.ocil_clause = None
self.ocil = None
self.oval_external_content = None
self.warnings = []
self.requires = []
self.conflicts = []
# self.platforms is used further in the build system
# self.platform is merged into self.platforms
# it is here for backward compatibility
self.platform = None
self.platforms = set()
self.cpe_names = set()
self.inherited_platforms = [] # platforms inherited from the group
self.template = None
self.local_env_yaml = None
@classmethod
def from_yaml(cls, yaml_file, env_yaml=None):
yaml_file = os.path.normpath(yaml_file)
rule_id, ext = os.path.splitext(os.path.basename(yaml_file))
if rule_id == "rule" and ext == ".yml":
rule_id = get_rule_dir_id(yaml_file)
local_env_yaml = None
if env_yaml:
local_env_yaml = dict()
local_env_yaml.update(env_yaml)
local_env_yaml["rule_id"] = rule_id
yaml_contents = open_and_macro_expand(yaml_file, local_env_yaml)
if yaml_contents is None:
return None
rule = cls(rule_id)
if local_env_yaml:
rule.local_env_yaml = local_env_yaml
try:
rule._set_attributes_from_dict(yaml_contents)
except RuntimeError as exc:
msg = ("Error processing '{fname}': {err}"
.format(fname=yaml_file, err=str(exc)))
raise RuntimeError(msg)
# platforms are read as list from the yaml file
# we need them to convert to set again
rule.platforms = set(rule.platforms)
for warning_list in rule.warnings:
if len(warning_list) != 1:
raise ValueError("Only one key/value pair should exist for each dictionary")
# ensure that content of rule.platform is in rule.platforms as
# well
if rule.platform is not None:
rule.platforms.add(rule.platform)
# Convert the platform names to CPE names
# But only do it if an env_yaml was specified (otherwise there would be no product CPEs
# to lookup), and the rule's prodtype matches the product being built
if env_yaml and env_yaml["product"] in parse_prodtype(rule.prodtype):
for platform in rule.platforms:
try:
rule.cpe_names.add(env_yaml["product_cpes"].get_cpe_name(platform))
except CPEDoesNotExist:
print("Unsupported platform '%s' in rule '%s'." % (platform, rule.id_))
raise
if yaml_contents:
raise RuntimeError("Unparsed YAML data in '%s'.\n\n%s"
% (yaml_file, yaml_contents))
if not rule.definition_location:
rule.definition_location = yaml_file
rule.validate_prodtype(yaml_file)
rule.validate_identifiers(yaml_file)
rule.validate_references(yaml_file)
return rule
def _verify_stigid_format(self, product):
stig_id = self.references.get("stigid", None)
if not stig_id:
return
if "," in stig_id:
raise ValueError("Rules can not have multiple STIG IDs.")
def _verify_disa_cci_format(self):
cci_id = self.references.get("disa", None)
if not cci_id:
return
cci_ex = re.compile(r'^CCI-[0-9]{6}$')
for cci in cci_id.split(","):
if not cci_ex.match(cci):
raise ValueError("CCI '{}' is in the wrong format! "
"Format should be similar to: "
"CCI-XXXXXX".format(cci))
self.references["disa"] = cci_id
def normalize(self, product):
try:
self.make_refs_and_identifiers_product_specific(product)
self.make_template_product_specific(product)
except Exception as exc:
msg = (
"Error normalizing '{rule}': {msg}"
.format(rule=self.id_, msg=str(exc))
)
raise RuntimeError(msg)
def _get_product_only_references(self):
product_references = dict()
for ref in Rule.PRODUCT_REFERENCES:
start = "{0}@".format(ref)
for gref, gval in self.references.items():
if ref == gref or gref.startswith(start):
product_references[gref] = gval
return product_references
def make_template_product_specific(self, product):
product_suffix = "@{0}".format(product)
if not self.template:
return
not_specific_vars = self.template.get("vars", dict())
specific_vars = self._make_items_product_specific(
not_specific_vars, product_suffix, True)
self.template["vars"] = specific_vars
not_specific_backends = self.template.get("backends", dict())
specific_backends = self._make_items_product_specific(
not_specific_backends, product_suffix, True)
self.template["backends"] = specific_backends
def make_refs_and_identifiers_product_specific(self, product):
product_suffix = "@{0}".format(product)
product_references = self._get_product_only_references()
general_references = self.references.copy()
for todel in product_references:
general_references.pop(todel)
for ref in Rule.PRODUCT_REFERENCES:
if ref in general_references:
msg = "Unexpected reference identifier ({0}) without "
msg += "product qualifier ({0}@{1}) while building rule "
msg += "{2}"
msg = msg.format(ref, product, self.id_)
raise ValueError(msg)
to_set = dict(
identifiers=(self.identifiers, False),
general_references=(general_references, True),
product_references=(product_references, False),
)
for name, (dic, allow_overwrites) in to_set.items():
try:
new_items = self._make_items_product_specific(
dic, product_suffix, allow_overwrites)
except ValueError as exc:
msg = (
"Error processing {what} for rule '{rid}': {msg}"
.format(what=name, rid=self.id_, msg=str(exc))
)
raise ValueError(msg)
dic.clear()
dic.update(new_items)
self.references = general_references
self._verify_disa_cci_format()
self.references.update(product_references)
self._verify_stigid_format(product)
def _make_items_product_specific(self, items_dict, product_suffix, allow_overwrites=False):
new_items = dict()
for full_label, value in items_dict.items():
if "@" not in full_label and full_label not in new_items:
new_items[full_label] = value
continue
label = full_label.split("@")[0]
# this test should occur before matching product_suffix with the product qualifier
# present in the reference, so it catches problems even for products that are not
# being built at the moment
if label in Rule.GLOBAL_REFERENCES:
msg = (
"You cannot use product-qualified for the '{item_u}' reference. "
"Please remove the product-qualifier and merge values with the "
"existing reference if there is any. Original line: {item_q}: {value_q}"
.format(item_u=label, item_q=full_label, value_q=value)
)
raise ValueError(msg)
if not full_label.endswith(product_suffix):
continue
if label in items_dict and not allow_overwrites and value != items_dict[label]:
msg = (
"There is a product-qualified '{item_q}' item, "
"but also an unqualified '{item_u}' item "
"and those two differ in value - "
"'{value_q}' vs '{value_u}' respectively."
.format(item_q=full_label, item_u=label,
value_q=value, value_u=items_dict[label])
)
raise ValueError(msg)
new_items[label] = value
return new_items
def _set_attributes_from_dict(self, yaml_contents):
for key, default_getter in self.YAML_KEYS_DEFAULTS.items():
if key not in yaml_contents:
value = default_getter()
if isinstance(value, Exception):
raise value
else:
value = yaml_contents.pop(key)
setattr(self, key, value)
def to_contents_dict(self):
"""
Returns a dictionary that is the same schema as the dict obtained when loading rule YAML.
"""
yaml_contents = dict()
for key in Rule.YAML_KEYS_DEFAULTS:
yaml_contents[key] = getattr(self, key)
return yaml_contents
def validate_identifiers(self, yaml_file):
if self.identifiers is None:
raise ValueError("Empty identifier section in file %s" % yaml_file)
# Validate all identifiers are non-empty:
for ident_type, ident_val in self.identifiers.items():
if not isinstance(ident_type, str) or not isinstance(ident_val, str):
raise ValueError("Identifiers and values must be strings: %s in file %s"
% (ident_type, yaml_file))
if ident_val.strip() == "":
raise ValueError("Identifiers must not be empty: %s in file %s"
% (ident_type, yaml_file))
if ident_type[0:3] == 'cce':
if not is_cce_format_valid(ident_val):
raise ValueError("CCE Identifier format must be valid: invalid format '%s' for CEE '%s'"
" in file '%s'" % (ident_val, ident_type, yaml_file))
if not is_cce_value_valid("CCE-" + ident_val):
raise ValueError("CCE Identifier value is not a valid checksum: invalid value '%s' for CEE '%s'"
" in file '%s'" % (ident_val, ident_type, yaml_file))
def validate_references(self, yaml_file):
if self.references is None:
raise ValueError("Empty references section in file %s" % yaml_file)
for ref_type, ref_val in self.references.items():
if not isinstance(ref_type, str) or not isinstance(ref_val, str):
raise ValueError("References and values must be strings: %s in file %s"
% (ref_type, yaml_file))
if ref_val.strip() == "":
raise ValueError("References must not be empty: %s in file %s"
% (ref_type, yaml_file))
for ref_type, ref_val in self.references.items():
for ref in ref_val.split(","):
if ref.strip() != ref:
msg = (
"Comma-separated '{ref_type}' reference "
"in {yaml_file} contains whitespace."
.format(ref_type=ref_type, yaml_file=yaml_file))
raise ValueError(msg)
def validate_prodtype(self, yaml_file):
for ptype in self.prodtype.split(","):
if ptype.strip() != ptype:
msg = (
"Comma-separated '{prodtype}' prodtype "
"in {yaml_file} contains whitespace."
.format(prodtype=self.prodtype, yaml_file=yaml_file))
raise ValueError(msg)
def to_xml_element(self):
rule = ET.Element('Rule')
rule.set('id', self.id_)
if self.prodtype != "all":
rule.set("prodtype", self.prodtype)
rule.set('severity', self.severity)
add_sub_element(rule, 'title', self.title)
add_sub_element(rule, 'description', self.description)
add_sub_element(rule, 'rationale', self.rationale)
main_ident = ET.Element('ident')
for ident_type, ident_val in self.identifiers.items():
# This is not true if items were normalized
if '@' in ident_type:
# the ident is applicable only on some product
# format : 'policy@product', eg. 'stigid@product'
# for them, we create a separate <ref> element
policy, product = ident_type.split('@')
ident = ET.SubElement(rule, 'ident')
ident.set(policy, ident_val)
ident.set('prodtype', product)
else:
main_ident.set(ident_type, ident_val)
if main_ident.attrib:
rule.append(main_ident)
main_ref = ET.Element('ref')
for ref_type, ref_val in self.references.items():
# This is not true if items were normalized
if '@' in ref_type:
# the reference is applicable only on some product
# format : 'policy@product', eg. 'stigid@product'
# for them, we create a separate <ref> element
policy, product = ref_type.split('@')
ref = ET.SubElement(rule, 'ref')
ref.set(policy, ref_val)
ref.set('prodtype', product)
else:
main_ref.set(ref_type, ref_val)
if main_ref.attrib:
rule.append(main_ref)
if self.oval_external_content:
check = ET.SubElement(rule, 'check')
check.set("system", "http://oval.mitre.org/XMLSchema/oval-definitions-5")
external_content = ET.SubElement(check, "check-content-ref")
external_content.set("href", self.oval_external_content)
else:
# TODO: This is pretty much a hack, oval ID will be the same as rule ID
# and we don't want the developers to have to keep them in sync.
# Therefore let's just add an OVAL ref of that ID.
oval_ref = ET.SubElement(rule, "oval")
oval_ref.set("id", self.id_)
if self.ocil or self.ocil_clause:
ocil = add_sub_element(rule, 'ocil', self.ocil if self.ocil else "")
if self.ocil_clause:
ocil.set("clause", self.ocil_clause)
add_warning_elements(rule, self.warnings)
add_nondata_subelements(rule, "requires", "id", self.requires)
add_nondata_subelements(rule, "conflicts", "id", self.conflicts)
for cpe_name in self.cpe_names:
platform_el = ET.SubElement(rule, "platform")
platform_el.set("idref", cpe_name)
return rule
def to_file(self, file_name):
root = self.to_xml_element()
tree = ET.ElementTree(root)
tree.write(file_name)
class DirectoryLoader(object):
def __init__(self, profiles_dir, bash_remediation_fns, env_yaml):
self.benchmark_file = None
self.group_file = None
self.loaded_group = None
self.rule_files = []
self.value_files = []
self.subdirectories = []
self.all_values = set()
self.all_rules = set()
self.all_groups = set()
self.profiles_dir = profiles_dir
self.bash_remediation_fns = bash_remediation_fns
self.env_yaml = env_yaml
self.product = env_yaml["product"]
self.parent_group = None
def _collect_items_to_load(self, guide_directory):
for dir_item in sorted(os.listdir(guide_directory)):
dir_item_path = os.path.join(guide_directory, dir_item)
_, extension = os.path.splitext(dir_item)
if extension == '.var':
self.value_files.append(dir_item_path)
elif dir_item == "benchmark.yml":
if self.benchmark_file:
raise ValueError("Multiple benchmarks in one directory")
self.benchmark_file = dir_item_path
elif dir_item == "group.yml":
if self.group_file:
raise ValueError("Multiple groups in one directory")
self.group_file = dir_item_path
elif extension == '.rule':
self.rule_files.append(dir_item_path)
elif is_rule_dir(dir_item_path):
self.rule_files.append(get_rule_dir_yaml(dir_item_path))
elif dir_item != "tests":
if os.path.isdir(dir_item_path):
self.subdirectories.append(dir_item_path)
else:
sys.stderr.write(
"Encountered file '%s' while recursing, extension '%s' "
"is unknown. Skipping..\n"
% (dir_item, extension)
)
def load_benchmark_or_group(self, guide_directory):
"""
Loads a given benchmark or group from the specified benchmark_file or
group_file, in the context of guide_directory, profiles_dir,
env_yaml, and bash_remediation_fns.
Returns the loaded group or benchmark.
"""
group = None
if self.group_file and self.benchmark_file:
raise ValueError("A .benchmark file and a .group file were found in "
"the same directory '%s'" % (guide_directory))
# we treat benchmark as a special form of group in the following code
if self.benchmark_file:
group = Benchmark.from_yaml(
self.benchmark_file, 'product-name', self.env_yaml
)
if self.profiles_dir:
group.add_profiles_from_dir(self.profiles_dir, self.env_yaml)
group.add_bash_remediation_fns_from_file(self.bash_remediation_fns)
if self.group_file:
group = Group.from_yaml(self.group_file, self.env_yaml)
self.all_groups.add(group.id_)
return group
def _load_group_process_and_recurse(self, guide_directory):
self.loaded_group = self.load_benchmark_or_group(guide_directory)
if self.loaded_group:
if self.parent_group:
self.parent_group.add_group(self.loaded_group, env_yaml=self.env_yaml)
self._process_values()
self._recurse_into_subdirs()
self._process_rules()
def process_directory_tree(self, start_dir, extra_group_dirs=None):
self._collect_items_to_load(start_dir)
if extra_group_dirs is not None:
self.subdirectories += extra_group_dirs
self._load_group_process_and_recurse(start_dir)
def _recurse_into_subdirs(self):
for subdir in self.subdirectories:
loader = self._get_new_loader()
loader.parent_group = self.loaded_group
loader.process_directory_tree(subdir)
self.all_values.update(loader.all_values)
self.all_rules.update(loader.all_rules)
self.all_groups.update(loader.all_groups)
def _get_new_loader(self):
raise NotImplementedError()
def _process_values(self):
raise NotImplementedError()
def _process_rules(self):
raise NotImplementedError()
class BuildLoader(DirectoryLoader):
def __init__(self, profiles_dir, bash_remediation_fns, env_yaml, resolved_rules_dir=None):
super(BuildLoader, self).__init__(profiles_dir, bash_remediation_fns, env_yaml)
self.resolved_rules_dir = resolved_rules_dir
if resolved_rules_dir and not os.path.isdir(resolved_rules_dir):
os.mkdir(resolved_rules_dir)
def _process_values(self):
for value_yaml in self.value_files:
value = Value.from_yaml(value_yaml, self.env_yaml)
self.all_values.add(value)
self.loaded_group.add_value(value)
def _process_rules(self):
for rule_yaml in self.rule_files:
try:
rule = Rule.from_yaml(rule_yaml, self.env_yaml)
except DocumentationNotComplete:
# Happens on non-debug build when a rule is "documentation-incomplete"
continue
prodtypes = parse_prodtype(rule.prodtype)
if "all" not in prodtypes and self.product not in prodtypes:
continue
self.all_rules.add(rule)
self.loaded_group.add_rule(rule, env_yaml=self.env_yaml)
if self.loaded_group.platforms:
rule.inherited_platforms += self.loaded_group.platforms
if self.resolved_rules_dir:
output_for_rule = os.path.join(
self.resolved_rules_dir, "{id_}.yml".format(id_=rule.id_))
mkdir_p(self.resolved_rules_dir)
with open(output_for_rule, "w") as f:
rule.normalize(self.env_yaml["product"])
yaml.dump(rule.to_contents_dict(), f)
def _get_new_loader(self):
return BuildLoader(
self.profiles_dir, self.bash_remediation_fns, self.env_yaml, self.resolved_rules_dir)
def export_group_to_file(self, filename):
return self.loaded_group.to_file(filename)
| 39.877911 | 116 | 0.60048 |
795884cd113ce101c43bde4e65c504fac805e745 | 501 | py | Python | tracker/templatetags/serializers.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | 1 | 2021-07-28T04:21:12.000Z | 2021-07-28T04:21:12.000Z | tracker/templatetags/serializers.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | null | null | null | tracker/templatetags/serializers.py | paul-serafimescu/growth-tracker | 972dd1de534225a9be2e763045db560f420a844c | [
"MIT"
] | null | null | null | from django import template
from django.db import models
from typing import Any, Union
register = template.Library()
@register.filter
def serialize(value: Union[models.Model, list]) -> Union[dict[str, Any], list[dict[str, Any]]]:
if isinstance(value, models.Model):
try:
return value.serialize()
except AttributeError:
return {}
elif isinstance(value, list):
return list(map(lambda model: model.serialize(), value))
else:
raise ValueError() # figure this out later
| 27.833333 | 95 | 0.708583 |
79588554671c30285cba20742448afd53f99b37e | 6,152 | py | Python | labml_nn/transformers/gmlp/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 1 | 2021-09-17T18:16:17.000Z | 2021-09-17T18:16:17.000Z | labml_nn/transformers/gmlp/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | null | null | null | labml_nn/transformers/gmlp/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 2 | 2021-06-16T05:56:35.000Z | 2021-10-19T07:33:44.000Z | """
---
title: Pay Attention to MLPs (gMLP)
summary: >
This is an annotated implementation/tutorial of Pay Attention to MLPs (gMLP) in PyTorch.
---
# Pay Attention to MLPs (gMLP)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Pay Attention to MLPs](https://papers.labml.ai/paper/2105.08050).
This paper introduces a Multilayer Perceptron (MLP) based architecture with gating,
which they name **gMLP**. It consists of a stack of $L$ *gMLP* blocks.
Here is [the training code](experiment.html) for a gMLP model based autoregressive model.
[](https://app.labml.ai/run/01bd941ac74c11eb890c1d9196651a4a)
"""
from typing import Optional
import torch
from torch import nn
class GMLPBlock(nn.Module):
"""
## gMLP Block
Each block does the following transformations to input embeddings
$X \in \mathbb{R}^{n \times d}$ where $n$ is the sequence length
and $d$ is the dimensionality of the embeddings:
\begin{align}
Z &= \sigma(XU) \\
\tilde{Z} &= s(Z) \\
Y &= \tilde{Z}V \\
\end{align}
where $V$ and $U$ are learnable projection weights.
$s(\cdot)$ is the Spacial Gating Unit defined below.
Output dimensionality of $s(\cdot)$ will be half of $Z$.
$\sigma$ is an activation function such as
[GeLU](https://pytorch.org/docs/stable/generated/torch.nn.GELU.html).
"""
def __init__(self, d_model: int, d_ffn: int, seq_len: int):
"""
`d_model` is the dimensionality ($d$) of $X$
`d_ffn` is the dimensionality of $Z$
`seq_len` is the length of the token sequence ($n$)
"""
super().__init__()
# Normalization layer fro Pre-Norm
self.norm = nn.LayerNorm([d_model])
# Activation function $\sigma$
self.activation = nn.GELU()
# Projection layer for $Z = \sigma(XU)$
self.proj1 = nn.Linear(d_model, d_ffn)
# Spacial Gating Unit $s(\cdot)$
self.sgu = SpacialGatingUnit(d_ffn, seq_len)
# Projection layer for $Y = \tilde{Z}V$
self.proj2 = nn.Linear(d_ffn // 2, d_model)
# Embedding size (required by [Encoder](../models.html#Encoder).
# We use the encoder module from transformer architecture and plug
# *gMLP* block as a replacement for the [Transformer Layer](../models.html#Encoder).
self.size = d_model
def forward(self, *, x: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `x` is the input embedding tensor $X$ of shape `[seq_len, batch_size, d_model]`
* `mask` is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other.
"""
# Keep a copy for shortcut connection
shortcut = x
# Normalize $X$
x = self.norm(x)
# Projection and activation $Z = \sigma(XU)$
z = self.activation(self.proj1(x))
# Spacial Gating Unit $\tilde{Z} = s(Z)$
z = self.sgu(z, mask)
# Final projection $Y = \tilde{Z}V$
z = self.proj2(z)
# Add the shortcut connection
return z + shortcut
class SpacialGatingUnit(nn.Module):
"""
## Spatial Gating Unit
$$s(Z) = Z_1 \odot f_{W,b}(Z_2)$$
where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension,
and $\odot$ is element-wise multiplication.
$Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension).
"""
def __init__(self, d_z: int, seq_len: int):
"""
* `d_z` is the dimensionality of $Z$
* `seq_len` is the sequence length
"""
super().__init__()
# Normalization layer before applying $f_{W,b}(\cdot)$
self.norm = nn.LayerNorm([d_z // 2])
# Weight $W$ in $f_{W,b}(\cdot)$.
#
# The paper notes that it's important to initialize weights to small values and the bias to $1$,
# so that during the initial training $s(\cdot)$ is close to identity (apart from the split).
self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(-0.01, 0.01), requires_grad=True)
# Weight $b$ in $f_{W,b}(\cdot)$
#
# The paper notes that it's important to initialize bias to $1$.
self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True)
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `z` is the input $Z$ of shape `[seq_len, batch_size, d_z]`
* `mask` is is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other. The last dimension of size `1` is the batch, which we have in other transformer
implementations and was left for compatibility.
"""
# Get sequence length
seq_len = z.shape[0]
# Split $Z$ into $Z_1$ and $Z_2$
z1, z2 = torch.chunk(z, 2, dim=-1)
# Check mask
if mask is not None:
# `mask` has shape `[seq_len_q, seq_len_k, batch_size]`.
# The batch dimension should be of size `1` because this implementation supports
# only same mask for all samples in the batch.
assert mask.shape[0] == 1 or mask.shape[0] == seq_len
assert mask.shape[1] == seq_len
# Here we only support the same mask for all samples
assert mask.shape[2] == 1
# Remove the batch dimension
mask = mask[:, :, 0]
# Normalize $Z_2$ before $f_{W,b}(\cdot)$
z2 = self.norm(z2)
# Get the weight matrix; truncate if larger than `seq_len`
weight = self.weight[:seq_len, :seq_len]
# Apply mask to the weights.
#
# If $W_{i,j}$ is $0$ then $f_{W,b}(Z_2)_i$ will not get any information
# from token $j$.
if mask is not None:
weight = weight * mask
# $f_{W,b}(Z_2) = W Z_2 + b$
z2 = torch.einsum('ij,jbd->ibd', weight, z2) + self.bias[:seq_len, None, None]
# $Z_1 \odot f_{W,b}(Z_2)$
return z1 * z2
| 38.45 | 131 | 0.607445 |
795885f8302dbf41ef04e37b87abdd0d4bf12727 | 5,047 | py | Python | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/compiler/tests/slice_ops_test.py | BonexGu/Blik2D | 8e0592787e5c8e8a28682d0e1826b8223eae5983 | [
"MIT"
] | 13 | 2017-02-22T02:20:06.000Z | 2018-06-06T04:18:03.000Z | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/compiler/tests/slice_ops_test.py | BonexGu/Blik2D | 8e0592787e5c8e8a28682d0e1826b8223eae5983 | [
"MIT"
] | null | null | null | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/compiler/tests/slice_ops_test.py | BonexGu/Blik2D | 8e0592787e5c8e8a28682d0e1826b8223eae5983 | [
"MIT"
] | 1 | 2019-10-17T10:58:23.000Z | 2019-10-17T10:58:23.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 3, 4, 5], result)
def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
class StridedSliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 4], result)
def test1DNegtiveStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([6, 4], result)
def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 9]],
[[6, 4]]], result)
def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
[4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
[7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
[9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[9, 8],
[1, 1]],
[[2, 4],
[5, 7]]], result)
if __name__ == "__main__":
googletest.main()
| 35.293706 | 80 | 0.477908 |
7958863b44f3ac219413c002bd17dc882609d001 | 1,327 | py | Python | cafeData/reviews/ReceiptReviewCrawling.py | yuueuni/cafein | 29a4074301c0575d9f0432e9219dbe77d0121e0b | [
"MIT"
] | null | null | null | cafeData/reviews/ReceiptReviewCrawling.py | yuueuni/cafein | 29a4074301c0575d9f0432e9219dbe77d0121e0b | [
"MIT"
] | null | null | null | cafeData/reviews/ReceiptReviewCrawling.py | yuueuni/cafein | 29a4074301c0575d9f0432e9219dbe77d0121e0b | [
"MIT"
] | null | null | null | from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import unicodedata
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
special_character_pattern = re.compile('[^\w\s]')
with open('receiptReview.txt', 'w', -1, 'utf-8') as f:
for i in range(0,59):
url = 'https://store.naver.com/restaurants/detail?entry=pll&id=895457986&query=%EC%B9%B4%ED%8E%98%20%EB%85%B8%ED%8B%B0%EB%93%9C%20%EC%B2%AD%EB%8B%B4&tab=receiptReview&tabPage=' + str(i)
print("now is "+ str(i)+ "page")
webpage = urlopen(url)
source = BeautifulSoup(webpage,'html.parser')
reviews = source.find_all('div', {'class': 'review_txt'})
scores = source.find_all('span', {'class': 'score'})
for i, review in enumerate(reviews):
text = review.get_text().strip().replace('\n', ' ') # \U0001f44d
text = emoji_pattern.sub(r'', text) # 이모티콘 제거
text = special_character_pattern.sub(r'', text) # 특수문자 제거
if text != '':
f.write(text + '/' + scores[i].get_text() + '\n')
print('finish') | 45.758621 | 193 | 0.598342 |
795886472569e9fa37b33ef1b73417d2821db034 | 7,215 | py | Python | src/spaceone/inventory/scheduler/inventory_scheduler.py | whdalsrnt/inventory | 5d1cfb3a9cb72156c48a0118b80e6ed2d67e8028 | [
"Apache-2.0"
] | 9 | 2020-06-04T23:01:38.000Z | 2021-06-03T03:38:59.000Z | src/spaceone/inventory/scheduler/inventory_scheduler.py | whdalsrnt/inventory | 5d1cfb3a9cb72156c48a0118b80e6ed2d67e8028 | [
"Apache-2.0"
] | 10 | 2020-08-20T01:34:30.000Z | 2022-03-14T04:59:48.000Z | src/spaceone/inventory/scheduler/inventory_scheduler.py | whdalsrnt/inventory | 5d1cfb3a9cb72156c48a0118b80e6ed2d67e8028 | [
"Apache-2.0"
] | 9 | 2020-06-08T22:03:02.000Z | 2021-12-06T06:12:30.000Z | import consul
import datetime
import logging
import time
from spaceone.core import config
from spaceone.core.locator import Locator
from spaceone.core.scheduler import HourlyScheduler, IntervalScheduler
from spaceone.core.auth.jwt.jwt_util import JWTUtil
__all__ = ['InventoryHourlyScheduler']
_LOGGER = logging.getLogger(__name__)
def _get_domain_id_from_token(token):
decoded_token = JWTUtil.unverified_decode(token)
return decoded_token['did']
WAIT_QUEUE_INITIALIZED = 10 # seconds for waiting queue initilization
INTERVAL = 10
MAX_COUNT = 10
def _validate_token(token):
if isinstance(token, dict):
protocol = token['protocol']
if protocol == 'consul':
consul_instance = Consul(token['config'])
value = False
while value is False:
uri = token['uri']
value = consul_instance.patch_token(uri)
if value:
_LOGGER.warn(f'[_validate_token] token: {value[:30]} uri: {uri}')
break
_LOGGER.warn(f'[_validate_token] token is not found ... wait')
time.sleep(INTERVAL)
token = value
return token
class InventoryHourlyScheduler(HourlyScheduler):
def __init__(self, queue, interval, minute=':00'):
super().__init__(queue, interval, minute)
self.count = self._init_count()
self.locator = Locator()
self.TOKEN = self._update_token()
self.domain_id = _get_domain_id_from_token(self.TOKEN)
def _init_count(self):
# get current time
cur = datetime.datetime.utcnow()
count = {
'previous': cur, # Last check_count time
'index': 0, # index
'hour': cur.hour, # previous hour
'started_at': 0, # start time of push_token
'ended_at': 0 # end time of execution in this tick
}
_LOGGER.debug(f'[_init_count] {count}')
return count
def _update_token(self):
token = config.get_global('TOKEN')
if token == "":
token = _validate_token(config.get_global('TOKEN_INFO'))
return token
def create_task(self):
# self.check_global_configuration()
schedules = self.list_schedules()
result = []
for schedule in schedules:
try:
stp = self._create_job_request(schedule)
result.append(stp)
except Exception as e:
_LOGGER.error(f'[create_task] check schedule {schedule}')
return result
def list_schedules(self):
try:
ok = self.check_count()
if ok == False:
# ERROR LOGGING
pass
# Loop all domain, then find scheduled collector
collector_svc = self.locator.get_service('CollectorService')
schedule = {'hour': self.count['hour']}
_LOGGER.debug(f'[push_token] schedule: {schedule}')
schedule_vos, total = collector_svc.scheduled_collectors({'schedule': schedule})
_LOGGER.debug(f'[push_token] scheduled count: {total}')
return schedule_vos
except Exception as e:
_LOGGER.error(e)
return []
def check_count(self):
# check current count is correct or not
cur = datetime.datetime.utcnow()
hour = cur.hour
# check
if (self.count['hour'] + self.config) % 24 != hour:
if self.count['hour'] == hour:
_LOGGER.error('[check_count] duplicated call in the same time')
else:
_LOGGER.error('[check_count] missing time')
# This is continuous task
count = {
'previous': cur,
'index': self.count['index'] + 1,
'hour': hour,
'started_at': cur
}
self.count.update(count)
def _update_count_ended_at(self):
cur = datetime.datetime.utcnow()
self.count['ended_at'] = cur
def _create_job_request(self, scheduler_vo):
""" Based on scheduler_vo, create Job Request
Args:
scheduler_vo: Scheduler VO
- scheduler_id
- name
- collector: Reference of Collector
- schedule
- filter
- collector_mode
- created_at
- last_scheduled_at
- domain_id
}
Returns:
jobs: SpaceONE Pipeline Template
Because if collector_info has credential_group_id,
we have to iterate all credentials in the credential_group
"""
_LOGGER.debug(f'[_create_job_request] scheduler_vo: {scheduler_vo}')
plugin_info = scheduler_vo.collector.plugin_info
_LOGGER.debug(f'plugin_info: {plugin_info}')
domain_id = scheduler_vo.domain_id
metadata = {'token': self.TOKEN,
'service': 'inventory',
'resource': 'Collector',
'verb': 'collect',
'domain_id': self.domain_id}
sched_job = {
'locator': 'SERVICE',
'name': 'CollectorService',
'metadata': metadata,
'method': 'collect',
'params': {'params': {
'collector_id': scheduler_vo.collector.collector_id,
# if filter
# contact credential
'collect_mode': 'ALL',
'filter': {},
'domain_id': domain_id
}
}
}
stp = {'name': 'inventory_collect_schedule',
'version': 'v1',
'executionEngine': 'BaseWorker',
'stages': [sched_job]}
_LOGGER.debug(f'[_create_job_request] tasks: {stp}')
return stp
class Consul:
def __init__(self, config):
"""
Args:
- config: connection parameter
Example:
config = {
'host': 'consul.example.com',
'port': 8500
}
"""
self.config = self._validate_config(config)
def _validate_config(self, config):
"""
Parameter for Consul
- host, port=8500, token=None, scheme=http, consistency=default, dc=None, verify=True, cert=None
"""
options = ['host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify', 'cert']
result = {}
for item in options:
value = config.get(item, None)
if value:
result[item] = value
return result
def patch_token(self, key):
"""
Args:
key: Query key (ex. /debug/supervisor/TOKEN)
"""
try:
conn = consul.Consul(**self.config)
index, data = conn.kv.get(key)
return data['Value'].decode('ascii')
except Exception as e:
_LOGGER.debug(f'[patch_token] failed: {e}')
return False
| 32.647059 | 104 | 0.535412 |
7958869699bb0560114ba66dde3e6a9159a11a0a | 1,959 | py | Python | api/src/opentrons/protocol_engine/execution/movement.py | Axel-Jacobsen/opentrons | c543d95c25003f2e784560efaa6a91f051d4cd33 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/execution/movement.py | Axel-Jacobsen/opentrons | c543d95c25003f2e784560efaa6a91f051d4cd33 | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/protocol_engine/execution/movement.py | Axel-Jacobsen/opentrons | c543d95c25003f2e784560efaa6a91f051d4cd33 | [
"Apache-2.0"
] | null | null | null | """Movement command handling."""
from opentrons.hardware_control.api import API as HardwareAPI
from ..state import StateView
from ..command_models import BasePipettingRequest, MoveToWellResult
class MovementHandler:
"""Implementation logic for gantry movement."""
_state: StateView
_hardware: HardwareAPI
def __init__(
self,
state: StateView,
hardware: HardwareAPI,
) -> None:
"""Initialize a MovementHandler instance."""
self._state = state
self._hardware = hardware
async def handle_move_to_well(
self,
request: BasePipettingRequest,
) -> MoveToWellResult:
"""Move to a specific well."""
pipette_id = request.pipetteId
labware_id = request.labwareId
well_name = request.wellName
# get the pipette's mount and current critical point, if applicable
pipette_location = self._state.motion.get_pipette_location(pipette_id)
hw_mount = pipette_location.mount.to_hw_mount()
origin_cp = pipette_location.critical_point
# get the origin of the movement from the hardware controller
origin = await self._hardware.gantry_position(
mount=hw_mount,
critical_point=origin_cp,
)
max_travel_z = self._hardware.get_instrument_max_height(mount=hw_mount)
# calculate the movement's waypoints
waypoints = self._state.motion.get_movement_waypoints(
pipette_id=pipette_id,
labware_id=labware_id,
well_name=well_name,
origin=origin,
origin_cp=origin_cp,
max_travel_z=max_travel_z,
)
# move through the waypoints
for wp in waypoints:
await self._hardware.move_to(
mount=hw_mount,
abs_position=wp.position,
critical_point=wp.critical_point
)
return MoveToWellResult()
| 31.095238 | 79 | 0.646248 |
795886db96780b0ca19411765e61ff38739ba219 | 1,002 | py | Python | src/models/utils.py | qihongl/demo-advantage-actor-critic | 00cfa770d872412da5a604f320d41dcfd30bc020 | [
"MIT"
] | 2 | 2021-05-16T14:13:35.000Z | 2022-02-10T07:05:33.000Z | src/models/utils.py | qihongl/demo-advantage-actor-critic | 00cfa770d872412da5a604f320d41dcfd30bc020 | [
"MIT"
] | null | null | null | src/models/utils.py | qihongl/demo-advantage-actor-critic | 00cfa770d872412da5a604f320d41dcfd30bc020 | [
"MIT"
] | 1 | 2021-06-01T14:13:42.000Z | 2021-06-01T14:13:42.000Z | import torch
def ortho_init(agent):
for name, wts in agent.named_parameters():
if 'weight' in name:
torch.nn.init.orthogonal_(wts)
elif 'bias' in name:
torch.nn.init.constant_(wts, 0)
def softmax(z, beta):
"""helper function, softmax with beta
Parameters
----------
z : torch tensor, has 1d underlying structure after torch.squeeze
the raw logits
beta : float, >0
softmax temp, big value -> more "randomness"
Returns
-------
1d torch tensor
a probability distribution | beta
"""
assert beta > 0
return torch.nn.functional.softmax(torch.squeeze(z / beta), dim=0)
def entropy(probs):
"""calculate entropy.
I'm using log base 2!
Parameters
----------
probs : a torch vector
a prob distribution
Returns
-------
torch scalar
the entropy of the distribution
"""
return - torch.stack([pi * torch.log2(pi) for pi in probs]).sum()
| 20.875 | 70 | 0.588822 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.