content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
from typing import Tuple
def line_to_pos(line: str) -> Tuple[int, ...]:
filtered = "".join(c for c in line if c.isdigit() or c in {"-", ","})
return tuple(map(int, filtered.split(",")))
starts = tuple(zip(*map(line_to_pos, raw.strip().split("\n"))))
from itertools import combinations
from typing import List, Iterable
class Axis:
poss: List[int]
vels: List[int]
def __init__(self, start_poss: Iterable[int]) -> None:
self.poss = list(start_poss)
self.vels = [0] * len(self.poss)
def __eq__(self, other) -> bool:
return self.poss == other.poss and self.vels == other.vels
def step(self) -> None:
for i, j in combinations(range(len(self.poss)), 2):
a, b = self.poss[i], self.poss[j]
diff = 1 if a < b else -1 if a > b else 0
self.vels[i] += diff
self.vels[j] -= diff
for i, vel in enumerate(self.vels):
self.poss[i] += vel
system = tuple(map(Axis, starts))
for axis in system:
for _ in range(1000):
axis.step()
pos_by_moon = zip(*(axis.poss for axis in system))
vel_by_moon = zip(*(axis.vels for axis in system))
print(sum(
sum(map(abs, pos)) * sum(map(abs, vel))
for pos, vel in zip(pos_by_moon, vel_by_moon)
))
def cycle_period(start_poss: Iterable[int]) -> int:
tort = Axis(start_poss) # Get some rest, buddy. :3
hare = Axis(tort.poss) # Up for a run? >:3c
hare.step()
steps = 1
while hare != tort:
hare.step()
steps += 1
return steps
from math import lcm
print(lcm(*map(cycle_period, starts)))
| day12/main.py | 1,554 | INPUTPATH = "input-test.txt" Get some rest, buddy. :3 Up for a run? >:3c | 72 | en | 0.866252 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cplotting as cplot
S={2+2j, 3+2j, 1.75+1j, 2+1j, 2.25+1j, 2.5+1j, 2.75+1j, 3+1j, 3.25+1j}
cplot.plot({1+2j+z for z in S},4)
cplot.show()
| python/chap_1/1.4.3.py | 193 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=100, metric='mAP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='models/pytorch/imagenet/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpSimpleHead',
in_channels=32,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=True,
with_ae_loss=[True],
extra=dict(final_conv_kernel=1, )),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True],
with_ae=[True],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=17,
num_stages=1,
ae_loss_type='exp',
with_ae_loss=[True],
push_loss_factor=[0.001],
pull_loss_factor=[0.001],
with_heatmaps_loss=[True],
heatmaps_loss_factor=[1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=24,
workers_per_gpu=1,
train=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| configs/bottom_up/hrnet/coco/hrnet_w32_coco_512x512.py | 5,290 | learning policy dict(type='TensorboardLoggerHook') model settings | 65 | en | 0.658911 |
import streamlit as st
import warnings
try:
from streamlit_terran_timeline import terran_timeline, generate_timeline
except ImportError:
warnings.warn(
"Failed to load terran_timeline from streamlit_terran_timeline. "
"Please run 'pip install streamlit_terran_timeline' or "
"'pip install .' if working locally"
)
exit(1)
st.header("Face-recognition interactive-timeline generator")
st.write(
"In this demo we show you how easy it is to create an interactive"
"timeline chart of faces detected on videos. Thanksfully, there's an open "
"source project called Terran that makes all this process super super easy!"
)
st.write("More descriptions here")
st.subheader("Loading your video")
st.write(
"You can select videos from **multiple sources**: "
"YouTube and almost any video streaming platform, or any local file"
)
#
# Ask the user to input a video link or path and show the video below
#
video_path = st.text_input(
"Link or path to video", "https://www.youtube.com/watch?v=v2VgA_MCNDg"
)
#
# Show the actual faces timeline chart
#
st.subheader("Faces timeline chart")
st.write("")
@st.cache(persist=True, ttl=86_400, suppress_st_warning=True, show_spinner=False)
def _generate_timeline(video_path):
timeline = generate_timeline(
video_src=video_path,
appearence_threshold=5,
batch_size=32,
duration=None,
framerate=8,
output_directory="timelines",
ref_directory=None,
similarity_threshold=0.75,
start_time=0,
thumbnail_rate=1,
)
return timeline
with st.spinner("Generating timeline"):
timeline = _generate_timeline(video_path)
start_time = terran_timeline(timeline)
st.video(video_path, start_time=int(start_time))
| streamlit_terran_timeline/examples/youtube.py | 1,795 | Ask the user to input a video link or path and show the video below Show the actual faces timeline chart | 104 | en | 0.554019 |
# from blazingsql import BlazingContext
from Configuration import ExecutionMode
from Configuration import Settings as Settings
# from dask.distributed import Client
from DataBase import createSchema as createSchema
# from EndToEndTests import countDistincTest
from EndToEndTests import (
GroupByWitoutAggregations,
aggregationsWithoutGroupByTest,
bindableAliasTest,
booleanTest,
caseTest,
castTest,
)
from EndToEndTests import coalesceTest as coalesceTest
from EndToEndTests import columnBasisTest as columnBasisTest
from EndToEndTests import (
commonTableExpressionsTest,
concatTest,
countWithoutGroupByTest,
dateTest,
dirTest,
fileSystemGSTest,
fileSystemLocalTest,
fileSystemS3Test,
)
from EndToEndTests import fullOuterJoinsTest as fullOuterJoinsTest
from EndToEndTests import groupByTest as groupByTest
from EndToEndTests import innerJoinsTest as innerJoinsTest
from EndToEndTests import crossJoinsTest as crossJoinsTest
from EndToEndTests import leftOuterJoinsTest as leftOuterJoinsTest
from EndToEndTests import (
likeTest,
literalTest,
# loadDataTest,
nestedQueriesTest,
nonEquiJoinsTest,
)
from EndToEndTests import orderbyTest as orderbyTest
from EndToEndTests import (
predicatesWithNulls,
roundTest,
simpleDistributionTest,
stringTests,
substringTest,
tablesFromPandasTest,
# timestampdiffTest,
timestampTest,
tpchQueriesTest,
)
from EndToEndTests import unaryOpsTest as unaryOpsTest
from EndToEndTests import unifyTablesTest
from EndToEndTests import unionTest as unionTest
from EndToEndTests import useLimitTest
from EndToEndTests import whereClauseTest as whereClauseTest
from EndToEndTests import wildCardTest
from pynvml import nvmlInit
from pyspark.sql import SparkSession
from Runner import runTest
from Utils import Execution, init_context
def main():
print("**init end2end**")
Execution.getArgs()
nvmlInit()
dir_data_file = Settings.data["TestSettings"]["dataDirectory"]
nRals = Settings.data["RunSettings"]["nRals"]
drill = "drill"
spark = "spark"
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if (
Settings.execution_mode == ExecutionMode.FULL and compareResults == "true"
) or Settings.execution_mode == ExecutionMode.GENERATOR:
# Create Table Drill -----------------------------------------
from pydrill.client import PyDrill
drill = PyDrill(host="localhost", port=8047)
createSchema.init_drill_schema(
drill, Settings.data["TestSettings"]["dataDirectory"], bool_test=True
)
# Create Table Spark -------------------------------------------------
spark = SparkSession.builder.appName("allE2ETest").getOrCreate()
createSchema.init_spark_schema(
spark, Settings.data["TestSettings"]["dataDirectory"]
)
# Create Context For BlazingSQL
bc, dask_client = init_context()
targetTestGroups = Settings.data["RunSettings"]["targetTestGroups"]
runAllTests = (
len(targetTestGroups) == 0
) # if targetTestGroups was empty the user wants to run all the tests
if runAllTests or ("aggregationsWithoutGroupByTest" in targetTestGroups):
aggregationsWithoutGroupByTest.main(
dask_client, drill, dir_data_file, bc, nRals
)
if runAllTests or ("coalesceTest" in targetTestGroups):
coalesceTest.main(
dask_client, drill, dir_data_file, bc, nRals
) # we are not supporting coalesce yet
if runAllTests or ("columnBasisTest" in targetTestGroups):
columnBasisTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("commonTableExpressionsTest" in targetTestGroups):
commonTableExpressionsTest.main(dask_client, drill, dir_data_file, bc, nRals)
# we are not supporting count distinct yet
# countDistincTest.main(dask_client, drill, dir_data_file, bc)
if runAllTests or ("countWithoutGroupByTest" in targetTestGroups):
countWithoutGroupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("dateTest" in targetTestGroups):
dateTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("timestampTest" in targetTestGroups):
timestampTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("fullOuterJoinsTest" in targetTestGroups):
fullOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("groupByTest" in targetTestGroups):
groupByTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("GroupByWitoutAggregations" in targetTestGroups):
GroupByWitoutAggregations.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("innerJoinsTest" in targetTestGroups):
innerJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("crossJoinsTest" in targetTestGroups):
crossJoinsTest.main(dask_client, spark, dir_data_file, bc, nRals)
if runAllTests or ("" in targetTestGroups):
leftOuterJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("nonEquiJoinsTest" in targetTestGroups):
nonEquiJoinsTest.main(dask_client, drill, dir_data_file, bc, nRals)
# loadDataTest.main(dask_client, bc) #check this
if runAllTests or ("nestedQueriesTest" in targetTestGroups):
nestedQueriesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("orderbyTest" in targetTestGroups):
orderbyTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("predicatesWithNulls" in targetTestGroups):
predicatesWithNulls.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("stringTests" in targetTestGroups):
stringTests.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("tablesFromPandasTest" in targetTestGroups):
tablesFromPandasTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unaryOpsTest" in targetTestGroups):
unaryOpsTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unifyTablesTest" in targetTestGroups):
unifyTablesTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("unionTest" in targetTestGroups):
unionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("useLimitTest" in targetTestGroups):
useLimitTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("whereClauseTest" in targetTestGroups):
whereClauseTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("bindableAliasTest" in targetTestGroups):
bindableAliasTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("booleanTest" in targetTestGroups):
booleanTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("caseTest" in targetTestGroups):
caseTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("castTest" in targetTestGroups):
castTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("concatTest" in targetTestGroups):
concatTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("literalTest" in targetTestGroups):
literalTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("dirTest" in targetTestGroups):
dirTest.main(dask_client, drill, dir_data_file, bc, nRals)
# HDFS is not working yet
# fileSystemHdfsTest.main(dask_client, drill, dir_data_file, bc)
# HDFS is not working yet
# mixedFileSystemTest.main(dask_client, drill, dir_data_file, bc)
if runAllTests or ("likeTest" in targetTestGroups):
likeTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("simpleDistributionTest" in targetTestGroups):
simpleDistributionTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("substringTest" in targetTestGroups):
substringTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("wildCardTest" in targetTestGroups):
wildCardTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("tpchQueriesTest" in targetTestGroups):
tpchQueriesTest.main(dask_client, drill, spark, dir_data_file, bc, nRals)
if runAllTests or ("roundTest" in targetTestGroups):
roundTest.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemLocalTest" in targetTestGroups):
fileSystemLocalTest.main(dask_client, drill, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GPUCI:
if runAllTests or ("fileSystemS3Test" in targetTestGroups):
fileSystemS3Test.main(dask_client, drill, dir_data_file, bc, nRals)
if runAllTests or ("fileSystemGSTest" in targetTestGroups):
fileSystemGSTest.main(dask_client, drill, dir_data_file, bc, nRals)
# timestampdiffTest.main(dask_client, spark, dir_data_file, bc, nRals)
if Settings.execution_mode != ExecutionMode.GENERATOR:
result, error_msgs = runTest.save_log(
Settings.execution_mode == ExecutionMode.GPUCI
)
max = 0
for i in range(0, len(Settings.memory_list)):
if (Settings.memory_list[i].delta) > max:
max = Settings.memory_list[i].delta
print("MAX DELTA: " + str(max))
print(
"""***********************************************************
********************"""
)
for i in range(0, len(Settings.memory_list)):
print(
Settings.memory_list[i].name
+ ":"
+ " Start Mem: "
+ str(Settings.memory_list[i].start_mem)
+ " End Mem: "
+ str(Settings.memory_list[i].end_mem)
+ " Diff: "
+ str(Settings.memory_list[i].delta)
)
return result, error_msgs
return True, []
if __name__ == "__main__":
import time
start = time.time() # in seconds
result, error_msgs = main()
if Settings.execution_mode != ExecutionMode.GENERATOR:
# NOTE kahro william percy mario : here we tell to gpuci there was
# an error comparing with historic results
# TODO william kharoly felipe we should try to enable and
# use this function in the future
result = True
if result is False:
for error_msg in error_msgs:
print(error_msg)
# import sys
end = time.time() # in seconds
elapsed = end - start # in seconds
time_delta_desc = (
str(elapsed / 60)
+ " minutes and "
+ str(int(elapsed) % 60)
+ " seconds"
)
print(
"==>> E2E FAILED against previous run, total time was: "
+ time_delta_desc
)
# TODO percy kharo willian: uncomment this line
# when gpuci has all the env vars set
# return error exit status to the command prompt (shell)
# sys.exit(1)
| tests/BlazingSQLTest/EndToEndTests/allE2ETest.py | 11,669 | from blazingsql import BlazingContext from dask.distributed import Client from EndToEndTests import countDistincTest loadDataTest, timestampdiffTest, Create Table Drill ----------------------------------------- Create Table Spark ------------------------------------------------- Create Context For BlazingSQL if targetTestGroups was empty the user wants to run all the tests we are not supporting coalesce yet we are not supporting count distinct yet countDistincTest.main(dask_client, drill, dir_data_file, bc) loadDataTest.main(dask_client, bc) check this HDFS is not working yet fileSystemHdfsTest.main(dask_client, drill, dir_data_file, bc) HDFS is not working yet mixedFileSystemTest.main(dask_client, drill, dir_data_file, bc) timestampdiffTest.main(dask_client, spark, dir_data_file, bc, nRals) in seconds NOTE kahro william percy mario : here we tell to gpuci there was an error comparing with historic results TODO william kharoly felipe we should try to enable and use this function in the future import sys in seconds in seconds TODO percy kharo willian: uncomment this line when gpuci has all the env vars set return error exit status to the command prompt (shell) sys.exit(1) | 1,189 | en | 0.668666 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: geometry.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import boxgeom_pb2 as boxgeom__pb2
import cylindergeom_pb2 as cylindergeom__pb2
import spheregeom_pb2 as spheregeom__pb2
import planegeom_pb2 as planegeom__pb2
import imagegeom_pb2 as imagegeom__pb2
import heightmapgeom_pb2 as heightmapgeom__pb2
import meshgeom_pb2 as meshgeom__pb2
import vector3d_pb2 as vector3d__pb2
import polylinegeom_pb2 as polylinegeom__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='geometry.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0egeometry.proto\x12\x0bgazebo.msgs\x1a\rboxgeom.proto\x1a\x12\x63ylindergeom.proto\x1a\x10spheregeom.proto\x1a\x0fplanegeom.proto\x1a\x0fimagegeom.proto\x1a\x13heightmapgeom.proto\x1a\x0emeshgeom.proto\x1a\x0evector3d.proto\x1a\x12polylinegeom.proto\"\xb5\x04\n\x08Geometry\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.gazebo.msgs.Geometry.Type\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.gazebo.msgs.BoxGeom\x12+\n\x08\x63ylinder\x18\x03 \x01(\x0b\x32\x19.gazebo.msgs.CylinderGeom\x12%\n\x05plane\x18\x04 \x01(\x0b\x32\x16.gazebo.msgs.PlaneGeom\x12\'\n\x06sphere\x18\x05 \x01(\x0b\x32\x17.gazebo.msgs.SphereGeom\x12%\n\x05image\x18\x06 \x01(\x0b\x32\x16.gazebo.msgs.ImageGeom\x12-\n\theightmap\x18\x07 \x01(\x0b\x32\x1a.gazebo.msgs.HeightmapGeom\x12#\n\x04mesh\x18\x08 \x01(\x0b\x32\x15.gazebo.msgs.MeshGeom\x12%\n\x06points\x18\t \x03(\x0b\x32\x15.gazebo.msgs.Vector3d\x12\'\n\x08polyline\x18\n \x03(\x0b\x32\x15.gazebo.msgs.Polyline\"\x93\x01\n\x04Type\x12\x07\n\x03\x42OX\x10\x01\x12\x0c\n\x08\x43YLINDER\x10\x02\x12\n\n\x06SPHERE\x10\x03\x12\t\n\x05PLANE\x10\x04\x12\t\n\x05IMAGE\x10\x05\x12\r\n\tHEIGHTMAP\x10\x06\x12\x08\n\x04MESH\x10\x07\x12\x10\n\x0cTRIANGLE_FAN\x10\x08\x12\x0e\n\nLINE_STRIP\x10\t\x12\x0c\n\x08POLYLINE\x10\n\x12\t\n\x05\x45MPTY\x10\x0b'
,
dependencies=[boxgeom__pb2.DESCRIPTOR,cylindergeom__pb2.DESCRIPTOR,spheregeom__pb2.DESCRIPTOR,planegeom__pb2.DESCRIPTOR,imagegeom__pb2.DESCRIPTOR,heightmapgeom__pb2.DESCRIPTOR,meshgeom__pb2.DESCRIPTOR,vector3d__pb2.DESCRIPTOR,polylinegeom__pb2.DESCRIPTOR,])
_GEOMETRY_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='gazebo.msgs.Geometry.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BOX', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CYLINDER', index=1, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPHERE', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLANE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMAGE', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEIGHTMAP', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESH', index=6, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIANGLE_FAN', index=7, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LINE_STRIP', index=8, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POLYLINE', index=9, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EMPTY', index=10, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=757,
)
_sym_db.RegisterEnumDescriptor(_GEOMETRY_TYPE)
_GEOMETRY = _descriptor.Descriptor(
name='Geometry',
full_name='gazebo.msgs.Geometry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='gazebo.msgs.Geometry.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='box', full_name='gazebo.msgs.Geometry.box', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cylinder', full_name='gazebo.msgs.Geometry.cylinder', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plane', full_name='gazebo.msgs.Geometry.plane', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sphere', full_name='gazebo.msgs.Geometry.sphere', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='gazebo.msgs.Geometry.image', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='heightmap', full_name='gazebo.msgs.Geometry.heightmap', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mesh', full_name='gazebo.msgs.Geometry.mesh', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='points', full_name='gazebo.msgs.Geometry.points', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='polyline', full_name='gazebo.msgs.Geometry.polyline', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GEOMETRY_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=757,
)
_GEOMETRY.fields_by_name['type'].enum_type = _GEOMETRY_TYPE
_GEOMETRY.fields_by_name['box'].message_type = boxgeom__pb2._BOXGEOM
_GEOMETRY.fields_by_name['cylinder'].message_type = cylindergeom__pb2._CYLINDERGEOM
_GEOMETRY.fields_by_name['plane'].message_type = planegeom__pb2._PLANEGEOM
_GEOMETRY.fields_by_name['sphere'].message_type = spheregeom__pb2._SPHEREGEOM
_GEOMETRY.fields_by_name['image'].message_type = imagegeom__pb2._IMAGEGEOM
_GEOMETRY.fields_by_name['heightmap'].message_type = heightmapgeom__pb2._HEIGHTMAPGEOM
_GEOMETRY.fields_by_name['mesh'].message_type = meshgeom__pb2._MESHGEOM
_GEOMETRY.fields_by_name['points'].message_type = vector3d__pb2._VECTOR3D
_GEOMETRY.fields_by_name['polyline'].message_type = polylinegeom__pb2._POLYLINE
_GEOMETRY_TYPE.containing_type = _GEOMETRY
DESCRIPTOR.message_types_by_name['Geometry'] = _GEOMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Geometry = _reflection.GeneratedProtocolMessageType('Geometry', (_message.Message,), {
'DESCRIPTOR' : _GEOMETRY,
'__module__' : 'geometry_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Geometry)
})
_sym_db.RegisterMessage(Geometry)
# @@protoc_insertion_point(module_scope)
| pygazebo/msg/geometry_pb2.py | 10,559 | Generated protocol buffer code.
-*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: geometry.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:gazebo.msgs.Geometry) @@protoc_insertion_point(module_scope) | 267 | en | 0.539253 |
from aws_cdk import (
core,
aws_iam as iam,
aws_kinesis as kinesis,
aws_kinesisfirehose as kinesisfirehose
)
class Lab07Stack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
role01 = iam.CfnRole(self,id="firehose01_role",assume_role_policy_document= {
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}],
"Version": "2012-10-17"
},managed_policy_arns=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole"
])
policy01=iam.CfnPolicy(self,id="firehose01_policy",policy_name="firehose01_policy",policy_document={
'Version': "2012-10-17",
'Statement': [
{
"Action": [
's3:AbortMultipartUpload',
's3:GetBucketLocation',
's3:GetObject',
's3:ListBucket',
's3:ListBucketMultipartUploads',
's3:PutObject'
],
"Resource": ['*'],
"Effect": "Allow"
}
]
},roles=[role01.ref])
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, id = "firehose01",
delivery_stream_name = "firehose01",
extended_s3_destination_configuration = {
# s3桶信息
'bucketArn': 'arn:aws:s3:::fluent-bit-s3',
# 压缩设置,老方案:gzip,新方案待定
'compressionFormat': 'GZIP',
# 格式转换,是否转换为orc,parquet,默认无
'DataFormatConversionConfiguration':"Disabled",
# 是否加密:默认无
'EncryptionConfiguration':"NoEncryption",
# 错误输出前缀
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'ProcessingConfiguration': {
"Enabled": True,
"Processor": {
"Type": "Lambda",
"Parameters": [
{
"ParameterName": "BufferIntervalInSeconds",
"ParameterValue": "60"
},
{
"ParameterName": "BufferSizeInMBs",
"ParameterValue": "3"
},
{
"ParameterName": "LambdaArn",
"ParameterValue": "arn:aws:lambda:ap-southeast-1:596030579944:function:firehose-test"
}
]
}
},
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role',
'S3BackupConfiguration': {
"BucketARN": 'arn:aws:s3:::fluent-bit-s3',
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'compressionFormat': 'GZIP',
'EncryptionConfiguration':"NoEncryption",
'Prefix': "/backup",
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role'
}
},
)
| Lab07/lab07/lab07_stack.py | 5,425 | The code that defines your stack goes here s3桶信息 压缩设置,老方案:gzip,新方案待定 格式转换,是否转换为orc,parquet,默认无 是否加密:默认无 错误输出前缀 | 110 | zh | 0.696952 |
"""Main app/routing file for TwitOff"""
from os import getenv
from flask import Flask, render_template, request
from twitoff.twitter import add_or_update_user
from twitoff.models import DB, User, MIGRATE
from twitoff.predict import predict_user
def create_app():
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB.init_app(app)
MIGRATE.init_app(app, DB)
# TODO - make rest of application
@app.route('/')
def root():
# SQL equivalent = "SELECT * FROM user;"
return render_template('base.html', title="Home", users=User.query.all())
@app.route("/compare", methods=["POST"])
def compare():
user0, user1 = sorted(
[request.values["user1"], request.values["user2"]])
# conditinoal that prevents same user comparison
if user0 == user1:
message = "Cannot compare users to themselves!"
else:
hypo_tweet_text = request.values["tweet_text"]
# prediction return zero or one depending upon user
prediction = predict_user(user0, user1, hypo_tweet_text)
message = "'{}' is more likely to be said by {} than {}".format(
hypo_tweet_text, user1 if prediction else user0,
user0 if prediction else user1
)
# returns rendered template with dynamic message
return render_template('prediction.html', title="Prediction:", message=message)
@app.route("/user", methods=["POST"])
@app.route("/user/<name>", methods=["GET"])
def user(name=None, message=""):
name = name or request.values["user_name"]
try:
if request.method == "POST":
add_or_update_user(name)
message = "User {} sucessfully added!".format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = "Error handling {}: {}".format(name, e)
tweets = []
return render_template("user.html", title=name, tweets=tweets, message=message)
@app.route("/update")
def update():
users = User.query.all()
for user in users:
add_or_update_user(user.name)
return render_template("base.html", title="Database has been updated!", users=User.query.all())
@app.route("/reset")
def reset():
DB.drop_all()
DB.create_all()
return render_template("base.html", title="Reset Database")
return app
| twitoff/app.py | 2,581 | Main app/routing file for TwitOff
TODO - make rest of application SQL equivalent = "SELECT * FROM user;" conditinoal that prevents same user comparison prediction return zero or one depending upon user returns rendered template with dynamic message | 250 | en | 0.684052 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import logging
import json
DEFAULT_LOG_NAME = 'azure.mgmt.common.filters'
DEFAULT_LOG_LEVEL = logging.DEBUG
DEFAULT_USER_AGENT = ''
class RequestFilter(object):
'''
Send the request.
'''
def __init__(self, session):
if session is None:
raise ValueError('session cannot be None.')
self._session = session
def send(self, prepared_request):
return self._session.send(prepared_request)
class SigningFilter(object):
'''
Sign the request.
'''
def __init__(self, creds):
if creds is None:
raise ValueError('creds cannot be None.')
self._creds = creds
def send(self, prepared_request):
self._creds.sign_request(prepared_request)
return self.next.send(prepared_request)
class UserAgentFilter(object):
'''
Add a user-agent header to the request.
'''
def __init__(self, user_agent):
if user_agent is None:
raise ValueError('user_agent cannot be None.')
self._user_agent = user_agent
def send(self, prepared_request):
prepared_request.headers['user-agent'] = self._user_agent
return self.next.send(prepared_request)
class LogFilter(object):
'''
Log the request to a standard python logger.
Example of enabling logging to the console:
import logging
logger = logging.getLogger('azure.mgmt.common.filters')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
'''
def __init__(self, name=DEFAULT_LOG_NAME, level=DEFAULT_LOG_LEVEL):
if name is None:
raise ValueError('name cannot be None.')
if level is None:
raise ValueError('level cannot be None.')
self.level = level
self.logger = logging.getLogger(name)
def send(self, prepared_request):
self._log_request(prepared_request)
response = self.next.send(prepared_request)
self._log_response(response)
return response
@staticmethod
def _headers_to_string(headers):
mask_headers = ['authorization']
headers_raw = []
for header, value in headers.items():
if header.lower() in mask_headers:
value = '*****'
headers_raw.append('%s: %s' % (header, value))
return '\n'.join(headers_raw)
@staticmethod
def _pretty_print(content):
try:
return json.dumps(
json.loads(content),
sort_keys=True,
indent=4,
separators=(',', ': '),
)
except Exception:
pass
return content
def _log_request(self, request):
if self.logger.isEnabledFor(self.level):
headers = self._headers_to_string(request.headers)
msg = ['Request: %s %s\n%s\n' % (request.method, request.url, headers)]
if request.body:
msg.append(self._pretty_print(request.body))
self.logger.log(self.level, '\n'.join(msg))
def _log_response(self, response):
if self.logger.isEnabledFor(self.level):
headers = self._headers_to_string(response.headers)
msg = ['Response: %s %s\n%s\n' % (response.status_code, response.reason, headers)]
if response.text:
msg.append(self._pretty_print(response.text))
self.logger.log(self.level, '\n'.join(msg))
| prototype/api/FlaskApp/FlaskApp/azure_components/azure/mgmt/common/filters.py | 4,199 | ------------------------------------------------------------------------- Copyright (c) Microsoft. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.-------------------------------------------------------------------------- | 714 | en | 0.765323 |
import os
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import i18n
import requests
import yaml
from . import config, frozen_utils, os_utils, print_utils
# The URL to the docker-compose.yml
BRAINFRAME_DOCKER_COMPOSE_URL = "https://{subdomain}aotu.ai/releases/brainframe/{version}/docker-compose.yml"
# The URL to the latest tag, which is just a file containing the latest version
# as a string
BRAINFRAME_LATEST_TAG_URL = (
"https://{subdomain}aotu.ai/releases/brainframe/latest"
)
def assert_installed(install_path: Path) -> None:
compose_path = install_path / "docker-compose.yml"
if not compose_path.is_file():
print_utils.fail_translate(
"general.brainframe-must-be-installed",
install_env_var=config.install_path.name,
)
def run(install_path: Path, commands: List[str]) -> None:
_assert_has_docker_permissions()
compose_path = install_path / "docker-compose.yml"
if frozen_utils.is_frozen():
# Rely on the system's Docker Compose, since Compose can't be easily embedded
# into a PyInstaller executable
full_command = ["docker-compose"]
else:
# Use the included Docker Compose
full_command = [
sys.executable,
"-m",
"compose",
]
full_command += [
"--file",
str(compose_path),
]
# Provide the override file if it exists
compose_override_path = install_path / "docker-compose.override.yml"
if compose_override_path.is_file():
full_command += ["--file", str(compose_override_path)]
# Provide the .env file if it exists
env_path = install_path / ".env"
if env_path.is_file():
full_command += ["--env-file", str(env_path)]
os_utils.run(full_command + commands)
def download(target: Path, version: str = "latest") -> None:
_assert_has_write_permissions(target.parent)
if version == "latest":
version = get_latest_version()
credentials = config.staging_credentials()
url = BRAINFRAME_DOCKER_COMPOSE_URL.format(
subdomain="staging." if config.is_staging.value else "",
version=version,
)
response = requests.get(url, auth=credentials, stream=True)
if not response.ok:
print_utils.fail_translate(
"general.error-downloading-docker-compose",
status_code=response.status_code,
error_message=response.text,
)
target.write_text(response.text)
if os_utils.is_root():
# Fix the permissions of the docker-compose.yml so that the BrainFrame
# group can edit it
os_utils.give_brainframe_group_rw_access([target])
def get_latest_version() -> str:
"""
:return: The latest available version in the format "vX.Y.Z"
"""
# Add the flags to authenticate with staging if the user wants to download
# from there
subdomain = "staging." if config.is_staging.value else ""
credentials = config.staging_credentials()
# Check what the latest version is
url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain)
response = requests.get(url, auth=credentials)
return response.text
def check_existing_version(install_path: Path) -> str:
compose_path = install_path / "docker-compose.yml"
compose = yaml.load(compose_path.read_text(), Loader=yaml.SafeLoader)
version = compose["services"]["core"]["image"].split(":")[-1]
version = "v" + version
return version
def _assert_has_docker_permissions() -> None:
"""Fails if the user does not have permissions to interact with Docker"""
if not (os_utils.is_root() or os_utils.currently_in_group("docker")):
error_message = (
i18n.t("general.docker-bad-permissions")
+ "\n"
+ _group_recommendation_message("docker")
)
print_utils.fail(error_message)
def _assert_has_write_permissions(path: Path) -> None:
"""Fails if the user does not have write access to the given path."""
if os.access(path, os.W_OK):
return
error_message = i18n.t("general.file-bad-write-permissions", path=path)
error_message += "\n"
if path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID:
error_message += " " + _group_recommendation_message("brainframe")
else:
error_message += " " + i18n.t(
"general.unexpected-group-for-file", path=path, group="brainframe"
)
print_utils.fail(error_message)
def _group_recommendation_message(group: str) -> str:
if os_utils.added_to_group("brainframe"):
# The user is in the group, they just need to restart
return i18n.t("general.restart-for-group-access", group=group)
else:
# The user is not in the group, so they need to either add
# themselves or use sudo
return i18n.t("general.retry-as-root-or-group", group=group)
| brainframe/cli/docker_compose.py | 4,916 | Fails if the user does not have permissions to interact with Docker
Fails if the user does not have write access to the given path.
:return: The latest available version in the format "vX.Y.Z"
The URL to the docker-compose.yml The URL to the latest tag, which is just a file containing the latest version as a string Rely on the system's Docker Compose, since Compose can't be easily embedded into a PyInstaller executable Use the included Docker Compose Provide the override file if it exists Provide the .env file if it exists Fix the permissions of the docker-compose.yml so that the BrainFrame group can edit it Add the flags to authenticate with staging if the user wants to download from there Check what the latest version is The user is in the group, they just need to restart The user is not in the group, so they need to either add themselves or use sudo | 866 | en | 0.907978 |
#!/usr/bin/env python3
def main():
pass
if __name__ == '__main__':
main()
| .vim/template/python/base-atcoder.py | 86 | !/usr/bin/env python3 | 21 | fr | 0.448822 |
# author : 陈熙
# encoding:utf-8
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
class SendEmail:
sender = 'atomuser@139.com'#'ccu_queryresul@139.com'
msg = MIMEMultipart('alternative')
msg['Subject'] = Header("长春大学成绩通知,请勿回复","utf-8")
msg['From'] = r"%s<atomuser@139.com>"%Header("www.a-tom.win","utf-8")
def __init__(self,table,rcpt):
self.table = table
self.rcpt = rcpt
SendEmail.msg['To'] = self.rcpt
html_part = MIMEText(self.table,'html')
html_part.set_charset('gbk')
SendEmail.msg.attach(html_part)
def send(self):
try:
s = smtplib.SMTP('smtp.139.com')
s.login('atomuser','849801576')
s.sendmail(SendEmail.sender,self.rcpt,SendEmail.msg.as_string())
return '邮件发送成功,请登录邮箱查收...'
except Exception:
return '邮件发送失败... '
def __del__(self):
pass
| ccuemail.py | 1,100 | author : 陈熙 encoding:utf-8'ccu_queryresul@139.com' | 50 | en | 0.439844 |
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
"""Test update ranges on DB using Click testing
https://click.palletsprojects.com/en/7.x/testing/
"""
from datacube_ows.update_ranges_impl import main
def test_updates_ranges_schema(runner, role_name):
result = runner.invoke(main, ["--schema", "--role", role_name])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_ranges_views(runner):
result = runner.invoke(main, ["--views"])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_version(runner):
result = runner.invoke(main, ["--version"])
assert "Open Data Cube Open Web Services (datacube-ows) version" in result.output
assert result.exit_code == 0
def test_update_ranges_product(runner, product_name):
result = runner.invoke(main, [product_name])
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_bad_product(runner, product_name):
result = runner.invoke(main, ["not_a_real_product_name"])
assert "not_a_real_product_name" in result.output
assert "Unrecognised product name" in result.output
assert result.exit_code == 1
def test_update_ranges(runner):
result = runner.invoke(main)
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_misuse_cases(runner, role_name, product_name):
result = runner.invoke(main, ["--schema"])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--role", role_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--views", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--schema", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
| integration_tests/test_update_ranges.py | 2,111 | Test update ranges on DB using Click testing
https://click.palletsprojects.com/en/7.x/testing/
This file is part of datacube-ows, part of the Open Data Cube project. See https://opendatacube.org for more information. Copyright (c) 2017-2021 OWS Contributors SPDX-License-Identifier: Apache-2.0 | 295 | en | 0.575564 |
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| c8ylp/cli/core.py | 21,548 | CLI Logger
Exit codes
Local proxy context
Remote access connection data
Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
Exit with a server not ready error
Returns:
NoReturn: The function does not return
Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
Get the log path
Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
Register signal handlers
Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
Set environment variables so information about the proxy can
be access by plugins
Show an error to the user and log it
Args:
msg (str): User message to print on the console
Show an info message to the user and log it
Args:
msg (str): User message to print on the console
Show an message to the user and log it
Args:
msg (str): User message to print on the console
Show a warning to the user and log it
Args:
msg (str): User message to print on the console
Signal handler
Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
Get the port used by the local proxy
Returns:
int: Port number
Store the port used by the local proxy for later reference
Args:
value (int): Port number
Exit codes
Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors SPDX-License-Identifier: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Support WSL environments and expose variables to be explosed to WSL pylint: disable=too-few-public-methods Set default log format ignore console log messages Remove default console logging and only use file logging Max 5 log files each 10 MB. Log to Rotating File Retry logging so the user can be prompted for their credentials/TFA code etc. without having to run c8ylp again Note: Don't save password! use first config find config matching name register signals as the proxy will be starting in a background thread to enable the proxy to run as a subcommand Start the proxy in a background thread so the user can Block until the local proxy is ready to accept connections Inject custom env variables for use within the script The subcommand is called after this Shutdown the server once the plugin has been run Only set ready signal once the whole env include env variables has been setup re-raise existing exit pylint: disable=too-many-branches,too-many-statements Block until the local proxy is ready to accept connections store the used port for reference to later Plugins start in a background thread so don't display it as the plugins should do their own thing loop, waiting for server to stop propagate exit code | 4,785 | en | 0.75027 |
import pandas as pd
import numpy as np
from numpy.linalg import inv
def get_ffme_returns():
"""
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
"""
me_m = pd.read_csv("data/Portfolios_Formed_on_ME_monthly_EW.csv",
header=0, index_col=0, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_fff_returns():
"""
Load the Fama-French Research Factor Monthly Dataset
"""
rets = pd.read_csv("data/F-F_Research_Data_Factors_m.csv",
header=0, index_col=0, na_values=-99.99)/100
rets.index = pd.to_datetime(rets.index, format="%Y%m").to_period('M')
return rets
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv("data/edhec-hedgefundindices.csv",
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_file(filetype, weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
"""
if filetype is "returns":
name = f"{weighting}_rets"
divisor = 100
elif filetype is "nfirms":
name = "nfirms"
divisor = 1
elif filetype is "size":
name = "size"
divisor = 1
else:
raise ValueError(f"filetype must be one of: returns, nfirms, size")
ind = pd.read_csv(f"data/ind{n_inds}_m_{name}.csv", header=0, index_col=0, na_values=-99.99)/divisor
ind.index = pd.to_datetime(ind.index, format="%Y%m").to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_returns(weighting="vw", n_inds=30):
"""
Load and format the Ken French Industry Portfolios Monthly Returns
"""
return get_ind_file("returns", weighting=weighting, n_inds=n_inds)
def get_ind_nfirms(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average number of Firms
"""
return get_ind_file("nfirms", n_inds=n_inds)
def get_ind_size(n_inds=30):
"""
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
"""
return get_ind_file("size", n_inds=n_inds)
def get_ind_market_caps(n_inds=30, weights=False):
"""
Load the industry portfolio data and derive the market caps
"""
ind_nfirms = get_ind_nfirms(n_inds=n_inds)
ind_size = get_ind_size(n_inds=n_inds)
ind_mktcap = ind_nfirms * ind_size
if weights:
total_mktcap = ind_mktcap.sum(axis=1)
ind_capweight = ind_mktcap.divide(total_mktcap, axis="rows")
return ind_capweight
#else
return ind_mktcap
def get_total_market_index_returns(n_inds=30):
"""
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
"""
ind_capweight = get_ind_market_caps(n_inds=n_inds)
ind_return = get_ind_returns(weighting="vw", n_inds=n_inds)
total_market_return = (ind_capweight * ind_return).sum(axis="columns")
return total_market_return
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**3).mean()
return exp/sigma_r**3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
"""
r = r[(r!=0) & (r.notnull())]
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r**4).mean()
return exp/sigma_r**4
def compound(r):
"""
returns the result of compounding the set of returns in r
"""
return np.expm1(np.log1p(r).sum())
def annualize_rets(r):
"""
Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
compounded_growth = (1+r_valid).prod()
return compounded_growth**(1/years_fraction)-1
def annualize_vol(r):
"""
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
"""
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
return r_valid.std()*((periods_per_year)**0.5)
def sharpe_ratio(r, riskfree_rate):
"""
Computes the annualized sharpe ratio of a set of returns
"""
# convert the annual riskfree rate to per period
r_valid = r[(r!=0) & (r.notnull())]
total_num_periods = r_valid.count()
date_beg = r_valid.agg(lambda x: x.first_valid_index())
date_end = r_valid.agg(lambda x: x.last_valid_index())
try:
years_fraction = (date_end-date_beg).dt.days/365.2425
except:
years_fraction = (date_end-date_beg).days/365.2425
periods_per_year = total_num_periods/years_fraction
rf_per_period = (1+riskfree_rate)**(1/periods_per_year)-1
excess_ret = r - rf_per_period
ann_ex_ret = annualize_rets(excess_ret)
ann_vol = annualize_vol(r)
return ann_ex_ret/ann_vol
import scipy.stats
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(is_normal)
else:
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def drawdown(return_series: pd.Series):
"""Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
"""
wealth_index = 1000*(1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks)/previous_peaks
return pd.DataFrame({"Wealth": wealth_index,
"Previous Peak": previous_peaks,
"Drawdown": drawdowns})
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
"""
if isinstance(r, pd.Series):
is_negative = r < 0
return r[is_negative].std(ddof=0)
elif isinstance(r, pd.DataFrame):
return r.aggregate(semideviation)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
r = r[(r!=0) & (r.notnull())]
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError("Expected r to be a Series or DataFrame")
from scipy.stats import norm
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
r = r[(r!=0) & (r.notnull())]
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def portfolio_return(weights, returns):
"""
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
"""
vol = (weights.T @ covmat @ weights)**0.5
return vol
def plot_ef2(n_points, er, cov):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError("plot_ef2 can only plot 2-asset frontiers")
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
return ef.plot.line(x="Volatility", y="Returns", style=".-")
from scipy.optimize import minimize
def minimize_vol(target_return, er, cov):
"""
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
return_is_target = {'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights,er)
}
weights = minimize(portfolio_vol, init_guess,
args=(cov,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,return_is_target),
bounds=bounds)
return weights.x
def tracking_error(r_a, r_b):
"""
Returns the Tracking Error between the two return series
"""
return np.sqrt(((r_a - r_b)**2).sum())
def msr(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def neg_sharpe(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharpe ratio
of the given portfolio
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate)/vol
weights = minimize(neg_sharpe, init_guess,
args=(riskfree_rate, er, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def gmv(cov):
"""
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
"""
n = cov.shape[0]
return msr(0, np.repeat(1, n), cov)
def optimal_weights(n_points, er, cov):
"""
Returns a list of weights that represent a grid of n_points on the efficient frontier
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov) for target_return in target_rs]
return weights
def plot_ef(n_points, er, cov, style='.-', legend=False, show_cml=False, riskfree_rate=0, show_ew=False, show_gmv=False):
"""
Plots the multi-asset efficient frontier
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
"Returns": rets,
"Volatility": vols
})
ax = ef.plot.line(x="Volatility", y="Returns", style=style, legend=legend)
if show_cml:
ax.set_xlim(left = 0)
# get MSR
w_msr = msr(riskfree_rate, er, cov)
r_msr = portfolio_return(w_msr, er)
vol_msr = portfolio_vol(w_msr, cov)
# add CML
cml_x = [0, vol_msr]
cml_y = [riskfree_rate, r_msr]
ax.plot(cml_x, cml_y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=10)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# add EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# add EW
ax.plot([vol_gmv], [r_gmv], color='midnightblue', marker='o', markersize=10)
return ax
def run_cppi(risky_r, safe_r=None, m=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# set up the CPPI parameters
dates = risky_r.index
n_steps = len(dates)
account_value = start
floor_value = start*floor
peak = account_value
if isinstance(risky_r, pd.Series):
risky_r = pd.DataFrame(risky_r, columns=["R"])
if safe_r is None:
safe_r = pd.DataFrame().reindex_like(risky_r)
safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_r)
risky_w_history = pd.DataFrame().reindex_like(risky_r)
cushion_history = pd.DataFrame().reindex_like(risky_r)
floorval_history = pd.DataFrame().reindex_like(risky_r)
peak_history = pd.DataFrame().reindex_like(risky_r)
for step in range(n_steps):
if drawdown is not None:
peak = np.maximum(peak, account_value)
floor_value = peak*(1-drawdown)
cushion = (account_value - floor_value)/account_value
risky_w = m*cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
safe_w = 1-risky_w
risky_alloc = account_value*risky_w
safe_alloc = account_value*safe_w
# recompute the new account value at the end of this step
account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])
# save the histories for analysis and plotting
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
account_history.iloc[step] = account_value
floorval_history.iloc[step] = floor_value
peak_history.iloc[step] = peak
risky_wealth = start*(1+risky_r).cumprod()
backtest_result = {
"Wealth": account_history,
"Risky Wealth": risky_wealth,
"Risk Budget": cushion_history,
"Risky Allocation": risky_w_history,
"m": m,
"start": start,
"floor": floor,
"risky_r":risky_r,
"safe_r": safe_r,
"drawdown": drawdown,
"peak": peak_history,
"floor": floorval_history
}
return backtest_result
def summary_stats(r, riskfree_rate=0.03):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = annualize_rets(r)
ann_vol = annualize_vol(r)
ann_sr = sharpe_ratio(r, riskfree_rate=riskfree_rate)
dd = r.aggregate(lambda r: drawdown(r).Drawdown.min())
skew = r.aggregate(skewness)
kurt = r.aggregate(kurtosis)
cf_var5 = r.aggregate(var_gaussian, modified=True)
hist_cvar5 = r.aggregate(cvar_historic)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
def gbm(n_years = 10, n_scenarios=1000, mu=0.07, sigma=0.15, steps_per_year=12, s_0=100.0, prices=True):
"""
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
"""
# Derive per-step Model Parameters from User Specifications
dt = 1/steps_per_year
n_steps = int(n_years*steps_per_year) + 1
# the standard way ...
# rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))
# without discretization error ...
rets_plus_1 = np.random.normal(loc=(1+mu)**dt, scale=(sigma*np.sqrt(dt)), size=(n_steps, n_scenarios))
rets_plus_1[0] = 1
ret_val = s_0*pd.DataFrame(rets_plus_1).cumprod() if prices else rets_plus_1-1
return ret_val
import statsmodels.api as sm
def regress(dependent_variable, explanatory_variables, alpha=True):
"""
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
"""
if alpha:
explanatory_variables = explanatory_variables.copy()
explanatory_variables["Alpha"] = 1
lm = sm.OLS(dependent_variable, explanatory_variables).fit()
return lm
def portfolio_tracking_error(weights, ref_r, bb_r):
"""
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
"""
return tracking_error(ref_r, (weights*bb_r).sum(axis=1))
def style_analysis(dependent_variable, explanatory_variables):
"""
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
"""
n = explanatory_variables.shape[1]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
solution = minimize(portfolio_tracking_error, init_guess,
args=(dependent_variable, explanatory_variables,), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
weights = pd.Series(solution.x, index=explanatory_variables.columns)
return weights
def ff_analysis(r, factors):
"""
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
"""
if isinstance(r, pd.Series):
dependent_variable = r
explanatory_variables = factors.loc[r.index]
tilts = regress(dependent_variable, explanatory_variables).params
elif isinstance(r, pd.DataFrame):
tilts = pd.DataFrame({col: ff_analysis(r[col], factors) for col in r.columns})
else:
raise TypeError("r must be a Series or a DataFrame")
return tilts
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[1]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, verbose=False, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert List of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov()
def weight_gmv(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the GMV portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return gmv(est_cov)
def cc_cov(r, **kwargs):
"""
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
"""
rhos = r.corr()
n = rhos.shape[0]
# this is a symmetric matrix with diagonals all 1 - so the mean correlation is ...
rho_bar = (rhos.values.sum()-n)/(n*(n-1))
ccor = np.full_like(rhos, rho_bar)
np.fill_diagonal(ccor, 1.)
sd = r.std()
return pd.DataFrame(ccor * np.outer(sd, sd), index=r.columns, columns=r.columns)
def shrinkage_cov(r, delta=0.5, **kwargs):
"""
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
"""
prior = cc_cov(r, **kwargs)
sample = sample_cov(r, **kwargs)
return delta*prior + (1-delta)*sample
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
def implied_returns(delta, sigma, w):
"""
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
"""
ir = delta * sigma.dot(w).squeeze() # to get a series from a 1-column dataframe
ir.name = 'Implied Returns'
return ir
# Assumes that Omega is proportional to the variance of the prior
def proportional_prior(sigma, tau, p):
"""
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
"""
helit_omega = p.dot(tau * sigma).dot(p.T)
# Make a diag matrix from the diag elements of Omega
return pd.DataFrame(np.diag(np.diag(helit_omega.values)),index=p.index, columns=p.index)
def bl(w_prior, sigma_prior, p, q,
omega=None,
delta=2.5, tau=.02):
"""
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
"""
if omega is None:
omega = proportional_prior(sigma_prior, tau, p)
# Force w.prior and Q to be column vectors
# How many assets do we have?
N = w_prior.shape[0]
# And how many views?
K = q.shape[0]
# First, reverse-engineer the weights to get pi
pi = implied_returns(delta, sigma_prior, w_prior)
# Adjust (scale) Sigma by the uncertainty scaling factor
sigma_prior_scaled = tau * sigma_prior
# posterior estimate of the mean, use the "Master Formula"
# we use the versions that do not require
# Omega to be inverted (see previous section)
# this is easier to read if we use '@' for matrixmult instead of .dot()
# mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi)
mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi).values))
# posterior estimate of uncertainty of mu.bl
# sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled
sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)
return (mu_bl, sigma_bl)
# for convenience and readability, define the inverse of a dataframe
def inverse(d):
"""
Invert the dataframe by inverting the underlying matrix
"""
return pd.DataFrame(inv(d.values), index=d.columns, columns=d.index)
def weight_msr(sigma, mu, scale=True):
"""
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
"""
w = inverse(sigma).dot(mu)
if scale:
w = w/sum(w) # fix: this assumes all w is +ve
return w
| kit.py | 30,248 | Annualizes a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
Annualizes the vol of a set of returns
We should infer the periods per year
but that is currently left as an exercise
to the reader :-)
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
# Computes the posterior expected returns based on
# the original black litterman reference model
#
# W.prior must be an N x 1 vector of weights, a Series
# Sigma.prior is an N x N covariance matrix, a DataFrame
# P must be a K x N matrix linking Q and the Assets, a DataFrame
# Q must be an K x 1 vector of views, a Series
# Omega must be a K x K matrix a DataFrame, or None
# if Omega is None, we assume it is
# proportional to variance of the prior
# delta and tau are scalars
Estimates a covariance matrix by using the Elton/Gruber Constant Correlation model
returns the result of compounding the set of returns in r
Computes the Conditional VaR of Series or DataFrame
Takes a time series of asset returns.
returns a DataFrame with columns for
the wealth index,
the previous peaks, and
the percentage drawdown
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
Returns the loadings of r on the Fama French Factors
which can be read in using get_fff_returns()
the index of r must be a (not necessarily proper) subset of the index of factors
r is either a Series or a DataFrame
Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo
:param n_years: The number of years to generate data for
:param n_paths: The number of scenarios/trajectories
:param mu: Annualized Drift, e.g. Market Return
:param sigma: Annualized Volatility
:param steps_per_year: granularity of the simulation
:param s_0: initial value
:return: a numpy array of n_paths columns and n_years*steps_per_year rows
Load the Fama-French Research Factor Monthly Dataset
Load the Fama-French Dataset for the returns of the Top and Bottom Deciles by MarketCap
Load and format the EDHEC Hedge Fund Index Returns
Load and format the Ken French Industry Portfolios files
Variant is a tuple of (weighting, size) where:
weighting is one of "ew", "vw"
number of inds is 30 or 49
Load the industry portfolio data and derive the market caps
Load and format the Ken French 30 Industry Portfolios Average number of Firms
Load and format the Ken French Industry Portfolios Monthly Returns
Load and format the Ken French 30 Industry Portfolios Average size (market cap)
Load the 30 industry portfolio data and derive the returns of a capweighted total market index
Returns the weights of the Global Minimum Volatility portfolio
given a covariance matrix
Obtain the implied expected returns by reverse engineering the weights
Inputs:
delta: Risk Aversion Coefficient (scalar)
sigma: Variance-Covariance Matrix (N x N) as DataFrame
w: Portfolio weights (N x 1) as Series
Returns an N x 1 vector of Returns as Series
Invert the dataframe by inverting the underlying matrix
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame
Returns a float or a Series
Returns the optimal weights that achieve the target return
given a set of expected returns and a covariance matrix
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
Returns the weights of the portfolio that gives you the maximum sharpe ratio
given the riskfree rate and expected returns and a covariance matrix
Returns the negative of the sharpe ratio
of the given portfolio
Returns a list of weights that represent a grid of n_points on the efficient frontier
Plots the multi-asset efficient frontier
Plots the 2-asset efficient frontier
Computes the return on a portfolio from constituent returns and weights
weights are a numpy array or Nx1 matrix and returns are a numpy array or Nx1 matrix
returns the tracking error between the reference returns
and a portfolio of building block returns held with given weights
Computes the vol of a portfolio from a covariance matrix and constituent weights
weights are a numpy array or N x 1 maxtrix and covmat is an N x N matrix
Returns the He-Litterman simplified Omega
Inputs:
sigma: N x N Covariance Matrix as DataFrame
tau: a scalar
p: a K x N DataFrame linking Q and Assets
returns a P x P DataFrame, a Matrix representing Prior Uncertainties
Runs a linear regression to decompose the dependent variable into the explanatory variables
returns an object of type statsmodel's RegressionResults on which you can call
.summary() to print a full summary
.params for the coefficients
.tvalues and .pvalues for the significance levels
.rsquared_adj and .rsquared for quality of fit
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
Returns the sample covariance of the supplied returns
Returns the semideviation aka negative semideviation of r
r must be a Series or a DataFrame, else raises a TypeError
Computes the annualized sharpe ratio of a set of returns
Covariance estimator that shrinks between the Sample Covariance and the Constant Correlation Estimators
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame
Returns a float or a Series
Returns the optimal weights that minimizes the Tracking error between
a portfolio of the explanatory variables and the dependent variable
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
Returns the Tracking Error between the two return series
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall below that number, and the (100-level) percent are above
Returns the weights of the CW portfolio based on the time series of capweights
Produces the weights of the ERC portfolio given a covariance matrix of the returns
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
Produces the weights of the GMV portfolio given a covariance matrix of the returns
Optimal (Tangent/Max Sharpe Ratio) Portfolio weights
by using the Markowitz Optimization Procedure
Mu is the vector of Excess expected Returns
Sigma must be an N x N matrix as a DataFrame and Mu a column vector as a Series
This implements page 188 Equation 5.2.28 of
"The econometrics of financial markets" Campbell, Lo and Mackinlay.
else use the population standard deviation, so set dof=0 use the population standard deviation, so set dof=0 convert the annual riskfree rate to per period compute the Z score assuming it was Gaussian modify the Z score based on observed skewness and kurtosis an N-tuple of 2-tuples! construct the constraints an N-tuple of 2-tuples! construct the constraints get MSR add CML add EW add EW set up the CPPI parameters fast way to set all values to a number set up some DataFrames for saving intermediate values recompute the new account value at the end of this step save the histories for analysis and plotting Derive per-step Model Parameters from User Specifications the standard way ... rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios)) without discretization error ... an N-tuple of 2-tuples! construct the constraints starting cap weight exclude microcapslimit weight to a multiple of capweightreweight return windows convert List of weights to DataFramemincount is to generate NAs if all inputs are NAs this is a symmetric matrix with diagonals all 1 - so the mean correlation is ... Marginal contribution of each constituent an N-tuple of 2-tuples! construct the constraints to get a series from a 1-column dataframe Assumes that Omega is proportional to the variance of the prior Make a diag matrix from the diag elements of Omega Force w.prior and Q to be column vectors How many assets do we have? And how many views? First, reverse-engineer the weights to get pi Adjust (scale) Sigma by the uncertainty scaling factor posterior estimate of the mean, use the "Master Formula" we use the versions that do not require Omega to be inverted (see previous section) this is easier to read if we use '@' for matrixmult instead of .dot() mu_bl = pi + sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ (q - p @ pi) posterior estimate of uncertainty of mu.bl sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled @ p.T @ inv(p @ sigma_prior_scaled @ p.T + omega) @ p @ sigma_prior_scaled for convenience and readability, define the inverse of a dataframe fix: this assumes all w is +ve | 9,703 | en | 0.778277 |
"""
_SubscriptionList_
Module with data structures to handle PhEDEx subscriptions
in bulk.
"""
import logging
from WMCore.WMException import WMException
PhEDEx_VALID_SUBSCRIPTION_PRIORITIES = ['low', 'normal', 'high', 'reserved']
class PhEDExSubscriptionException(WMException):
"""
_PhEDExSubscriptionException_
Exception class for the phedex subscription
"""
pass
class PhEDExSubscription(object):
"""
_PhEDExSubscription_
Data structure which contains PHEDEx fields for
PhEDEx subscription data service
"""
def __init__(self, datasetPathList, nodeList, group, level = 'dataset',
priority = 'normal', move = 'n', static = 'n', custodial = 'n',
request_only = 'y', blocks = None, subscriptionId = -1, comments=""):
"""
Initialize PhEDEx subscription with default value
"""
if isinstance(datasetPathList, basestring):
datasetPathList = [datasetPathList]
if isinstance(nodeList, basestring):
nodeList = [nodeList]
self.datasetPaths = set(datasetPathList)
self.nodes = set(nodeList)
self.level = level.lower()
self.priority = priority.lower()
self.move = move.lower()
self.static = static.lower()
self.group = group
self.custodial = custodial.lower()
self.request_only = request_only.lower()
self.requesterID = None
self.status = "New"
self.comments = comments
# Subscription id for internal accounting
self.subscriptionIds = set([subscriptionId])
# Optional blocks for non-dataset subscriptions
self.blocks = blocks
try:
# Validation checks on the subscription
for option in (self.static, self.custodial, self.request_only, self.move):
assert option in ('y', 'n')
assert self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES
assert self.level in ('dataset', 'block')
if self.level == 'block':
assert self.blocks is not None
except AssertionError:
msg = "The subscription is not a valid PhEDEx subscription.\n"
msg += "Check the options for this subscription: \n"
msg += "level: %s\n" % self.level
msg += "priority: %s\n" % self.priority
msg += "static: %s\n" % self.static
msg += "move: %s\n" % self.move
msg += "custodial: %s\n" % self.custodial
msg += "blocks: %s\n" % str(self.blocks)
raise PhEDExSubscriptionException(msg)
def __str__(self):
"""
Write out useful information for this object
:return:
"""
res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes,
'priority': self.priority, 'move': self.move,
'group': self.group, 'custodial': self.custodial,
'request_only': self.request_only, 'blocks': self.blocks}
return str(res)
def isEqualOptions(self, subscription):
return (self.level == subscription.level
and self.priority == subscription.priority
and self.request_only == subscription.request_only
and self.custodial == subscription.custodial
and self.group == subscription.group
and self.move == subscription.move
and self.static == subscription.static)
def isEqualDatasetPaths(self, subscription):
return (self.datasetPaths == subscription.datasetPaths
and self.isEqualOptions(subscription))
def isEqualNode(self, subscription):
return (self.nodes == subscription.nodes
and self.isEqualOptions(subscription))
def addDatasetPaths(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.datasetPaths = self.datasetPaths.union(subscription.datasetPaths)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def addNodes(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.nodes = self.nodes.union(subscription.nodes)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def getDatasetPaths(self):
return list(self.datasetPaths)
def getSubscriptionIds(self):
return list(self.subscriptionIds)
def getDatasetsAndBlocks(self):
"""
_getDatasetsAndBlocks_
Get the block structure
with datasets and blocks
"""
return self.blocks
def getNodes(self):
return list(self.nodes)
def getRequesterID(self):
return self.requesterID
def setRequesterID(self, requesterId):
if self.requesterID == None:
self.requesterID = requesterId
else:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
def matchesExistingTransferRequest(self, phedexDataSvc):
"""
_matchesExistingTransferRequest_
Check the given phedex data service to verify if an unapproved
transfer request equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingTransferRequest can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingTransferRequest is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Get the unapproved requests involving the node and dataset in this subscription
existingRequests = phedexDataSvc.getRequestList(dataset = dataset,
node = node,
decision = 'pending')['phedex']['request']
for request in existingRequests:
# Get the detailed information in the request
requestId = request['id']
requestInfo = phedexDataSvc.getTransferRequests(request = requestId)['phedex']['request']
if not requestInfo:
logging.error("Transfer request %s doesn't exist in PhEDEx", requestId)
continue # Strange, but let it go.
requestInfo = requestInfo[0] # It's a singleton
# Make sure that the node is in the destinations
destinations = requestInfo['destinations']['node']
for nodeInfo in destinations:
if nodeInfo['name'] == node:
break
else:
continue
# Create a subscription with this info
phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, self.level, requestInfo['priority'],
requestInfo['move'], requestInfo['static'],
requestInfo['custodial'], self.request_only)
if self.isEqualOptions(phedexRequest):
return True
return False
def matchesExistingSubscription(self, phedexDataSvc):
"""
_matchesExistingSubscription_
Check the given phedex data service to verify if a PhEDEx subscription
equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingSubscription can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingSubscription is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Check if the dataset has a subscription the given node
existingSubscription = phedexDataSvc.subscriptions(dataset = dataset,
node = node)['phedex']['dataset']
if len(existingSubscription) < 1:
# No subscriptions
return False
datasetInfo = existingSubscription[0]
for subscriptionInfo in datasetInfo['subscription']:
# Check that the node in the subscription matches the current node
if node != subscriptionInfo['node']:
continue
# Create a subscription with the info
phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, subscriptionInfo['level'],
subscriptionInfo['priority'], subscriptionInfo['move'],
self.static, subscriptionInfo['custodial'],
self.request_only)
if self.isEqualOptions(phedexSub):
return True
return False
class SubscriptionList(object):
"""
_SubscriptionList_
Class represents collection of subscription.
This organizes the subscriptions in a way to minimize their number.
"""
def __init__(self):
self._subList = []
def addSubscription(self, subObj):
"""
_addSubscription_
Add a new subscription to the subscription policy.
If the same subscription key exist just add the node list
"""
for subscription in self._subList:
if subscription.isEqualOptions(subObj):
if subscription.isEqualNode(subObj):
subscription.addDatasetPaths(subObj)
return
self._subList.append(subObj)
return
def compact(self):
"""
_compact_
Compact the subscription list by aggregating the subscriptions where the nodes
share a list of dataset paths.
"""
# Bag the subscriptions, keep indexes of bagged items to
# avoid modifying the list in place or copying the list
bags = []
baggedIndexes = set()
for i, subscriptionA in enumerate(self._subList):
if i in baggedIndexes:
continue
bags.append([subscriptionA])
for j, subscriptionB in enumerate(self._subList[i + 1:], i + 1):
if j in baggedIndexes:
continue
if subscriptionA.isEqualOptions(subscriptionB) and \
subscriptionA.isEqualDatasetPaths(subscriptionB):
bags[-1].append(subscriptionB)
baggedIndexes.add(j)
# Aggregate the subscriptions in the bags
newSubList = []
for bag in bags:
anchorSubscription = bag[0]
for subscription in bag[1:]:
anchorSubscription.addNodes(subscription)
newSubList.append(anchorSubscription)
self._subList = newSubList
def getSubscriptionList(self):
return self._subList
| src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py | 11,724 | _PhEDExSubscription_
Data structure which contains PHEDEx fields for
PhEDEx subscription data service
_PhEDExSubscriptionException_
Exception class for the phedex subscription
_SubscriptionList_
Class represents collection of subscription.
This organizes the subscriptions in a way to minimize their number.
Initialize PhEDEx subscription with default value
Write out useful information for this object
:return:
_addSubscription_
Add a new subscription to the subscription policy.
If the same subscription key exist just add the node list
_compact_
Compact the subscription list by aggregating the subscriptions where the nodes
share a list of dataset paths.
_getDatasetsAndBlocks_
Get the block structure
with datasets and blocks
_matchesExistingSubscription_
Check the given phedex data service to verify if a PhEDEx subscription
equal to this subscription is already in the system.
_matchesExistingTransferRequest_
Check the given phedex data service to verify if an unapproved
transfer request equal to this subscription is already in the system.
_SubscriptionList_
Module with data structures to handle PhEDEx subscriptions
in bulk.
Subscription id for internal accounting Optional blocks for non-dataset subscriptions Validation checks on the subscription Get the unapproved requests involving the node and dataset in this subscription Get the detailed information in the request Strange, but let it go. It's a singleton Make sure that the node is in the destinations Create a subscription with this info Check if the dataset has a subscription the given node No subscriptions Check that the node in the subscription matches the current node Create a subscription with the info Bag the subscriptions, keep indexes of bagged items to avoid modifying the list in place or copying the list Aggregate the subscriptions in the bags | 1,842 | en | 0.73789 |
"""
Functions for signals and positions created within this package.
Copyright 2021 InferStat Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from infertrade.PandasEnum import PandasEnum
from infertrade.algos.community.allocations import scikit_allocation_factory, infertrade_export_allocations
from infertrade.algos.community.signals import normalised_close, scikit_signal_factory, infertrade_export_signals
# A dictionary providing the list of community signals and trading strategies.
infertrade_export = {
"signal": infertrade_export_signals,
PandasEnum.ALLOCATION.value: infertrade_export_allocations,
}
| infertrade/algos/community/__init__.py | 1,105 | Functions for signals and positions created within this package.
Copyright 2021 InferStat Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
A dictionary providing the list of community signals and trading strategies. | 698 | en | 0.865362 |
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
#Getting api key
api_key = None
#Getting the news base url
# NEWS_API_KEY = None
# NEWS_API_BASE_URL = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
'''
function that gets the json response to our url request
'''
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
def process_sources(sources_list):
'''
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
'''
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
'''
function that checks the articles and processes them into instances
'''
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
| app/requests.py | 3,138 | Function that processes the articles and returns a list of articles objects
function that gets the json response to our url request
function that checks the articles and processes them into instances
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
Getting api keyGetting the news base url NEWS_API_KEY = None NEWS_API_BASE_URL = None | 428 | en | 0.794258 |
class Const:
"""
常量
"""
class ConstError(TypeError):pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const (%s)" %name)
self.__dict__[name]=value
LAYOUT = Const()
"""
布局
"""
LAYOUT.SCREEN_WIDTH = 500
LAYOUT.SCREEN_HEIGHT = 600
LAYOUT.SIZE = 4
LAYOUT.TERRAIN_X = 50
LAYOUT.TERRAIN_Y = 20
LAYOUT.TILE_WIDTH = 100
LAYOUT.TILE_HEIGHT = 90
LAYOUT.SCOREBOARD_X = 50
LAYOUT.SCOREBOARD_Y = 400
LAYOUT.POPUP_X = 100
LAYOUT.POPUP_Y = 400
LAYOUT.POPUP_WIDTH = 300
LAYOUT.POPUP_HEIGHT = 200
IMAGE = Const()
"""
图片
"""
IMAGE.TILE = "assets/tile.png"# 地砖
IMAGE.MIST = "assets/mist.png"# 战争迷雾
IMAGE.HERO = "assets/hero.png" # 英雄
IMAGE.MONSTER = "assets/monster.png" # 怪物
IMAGE.PIT = "assets/pit.png" # 陷阱
IMAGE.GOLD = "assets/gold.png" # 黄金
IMAGE.BREEZE = "assets/breeze.png" # 微风
IMAGE.STRENCH = "assets/strench.png" # 臭气
EVENT = Const()
"""
事件
"""
EVENT.GAME_OVER = "gameOver" # 游戏结束
EVENT.GAME_CLEAR = "gameClear" # 游戏通关
EVENT.MONSTER_DEAD = "monsterDead" # 怪兽死亡
EVENT.HERO_WALK = "heroWalk" # 英雄走动
EVENT.HERO_ATTACK = "heroAttack" # 英雄攻击
EVENT.DANGER = "danger" # 遭遇危险
ENCOUNTER = Const()
"""
遭遇
"""
ENCOUNTER.MONSTER = 21 # 怪物
ENCOUNTER.PIT = 22 # 坑洞
ENCOUNTER.GOLD = 10 # 黄金
SCORE = Const()
"""
分数
"""
SCORE.WALK = -1 # 行走
SCORE.WIN = 1000 # 胜利
SCORE.LOSE = -1000 # 失败
SCORE.ATTACK = -10 # 攻击 | src/const.py | 1,502 | 常量
地砖 战争迷雾 英雄 怪物 陷阱 黄金 微风 臭气 游戏结束 游戏通关 怪兽死亡 英雄走动 英雄攻击 遭遇危险 怪物 坑洞 黄金 行走 胜利 失败 攻击 | 81 | zh | 0.999097 |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC, SKELETON_PEDREC_JOINT_COLORS, SKELETON_PEDREC_LIMB_COLORS
from pedrec.visualizers.visualization_helper_3d import draw_origin_3d, draw_grid_3d
def add_skeleton_3d_to_axes(ax: Axes3D, skeleton_3d: np.ndarray, size: float = 2, min_score: float = 0.3):
# Joints
xs = skeleton_3d[:, 0]
ys = skeleton_3d[:, 2]
zs = skeleton_3d[:, 1]
colors = []
for idx, joint in enumerate(skeleton_3d):
if joint[3] < min_score: # score
colors.append([0, 0, 0, 0])
else:
colors.append(SKELETON_PEDREC_JOINT_COLORS[idx].rgba_float_list)
ax.scatter(xs, ys, zs, c=colors, s=size)
# Limbs
for idx, pair in enumerate(SKELETON_PEDREC):
if (skeleton_3d[pair[0:2], 3] >= min_score).all():
ax.plot(skeleton_3d[pair[0:2], 0], skeleton_3d[pair[0:2], 2], skeleton_3d[pair[0:2], 1], linewidth=size, c=SKELETON_PEDREC_LIMB_COLORS[idx].rgba_float_list)
def get_skeleton_3d_figure(skeleton_3d: np.ndarray):
# Preparation
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
draw_grid_3d(ax)
draw_origin_3d(ax)
add_skeleton_3d_to_axes(ax, skeleton_3d)
return fig, ax
def plot_skeleton_3d(skeleton_3d: np.ndarray):
fig, ax = get_skeleton_3d_figure(skeleton_3d)
plt.show()
| pedrec/visualizers/skeleton_3d_visualizer.py | 1,441 | Joints score Limbs Preparation | 30 | en | 0.787089 |
from ctypes import (
Structure,
Union,
c_char,
c_double,
c_int,
c_long,
c_short,
c_ubyte,
c_uint,
c_ulong,
c_ushort,
)
from . import timespec
class c_gsfSeaBeamSpecific(Structure):
_fields_ = [("EclipseTime", c_ushort)]
class c_gsfEM100Specific(Structure):
_fields_ = [
("ship_pitch", c_double),
("transducer_pitch", c_double),
("mode", c_int),
("power", c_int),
("attenuation", c_int),
("tvg", c_int),
("pulse_length", c_int),
("counter", c_int),
]
class c_gsfEM121ASpecific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("valid_beams", c_int),
("pulse_length", c_int),
("beam_width", c_int),
("tx_power", c_int),
("tx_status", c_int),
("rx_status", c_int),
("surface_velocity", c_double),
]
class c_gsfSeaBatSpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
]
class c_gsfEM950Specific(Structure):
_fields_ = [
("ping_number", c_int),
("mode", c_int),
("ping_quality", c_int),
("ship_pitch", c_double),
("transducer_pitch", c_double),
("surface_velocity", c_double),
]
SEAMAP_DOUBLE_ARRAY_OF_2 = c_double * 2
class c_gsfSeamapSpecific(Structure):
_fields_ = [
("portTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("stbdTransmitter", SEAMAP_DOUBLE_ARRAY_OF_2),
("portGain", c_double),
("stbdGain", c_double),
("portPulseLength", c_double),
("stbdPulseLength", c_double),
("pressureDepth", c_double),
("altitude", c_double),
("temperature", c_double),
]
class c_gsfTypeIIISpecific(Structure):
_fields_ = [
("leftmost_beam", c_ushort),
("rightmost_beam", c_ushort),
("total_beams", c_ushort),
("nav_mode", c_ushort),
("ping_number", c_ushort),
("mission_number", c_ushort),
]
class c_gsfCmpSassSpecific(Structure):
_fields_ = [("lfreq", c_double), ("lntens", c_double)]
class c_gsfSBAmpSpecific(Structure):
_fields_ = [
("hour", c_ushort),
("minute", c_ushort),
("second", c_ushort),
("hundredths", c_ushort),
("block_number", c_uint),
("avg_gate_depth", c_short),
]
SEA_BAT_CHAR_ARRAY_OF_4 = c_char * 4
class c_gsfSeaBatIISpecific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("sonar_range", c_int),
("transmit_power", c_int),
("receive_gain", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
class c_gsfSeaBat8101Specific(Structure):
_fields_ = [
("ping_number", c_int),
("surface_velocity", c_double),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("pulse_width", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("projector", c_int),
("spare", SEA_BAT_CHAR_ARRAY_OF_4),
]
SEA_BEAM_ALGORITHM_ORDER = c_char * 5
SEA_BEAM_SPARE = c_char * 2
class c_gsfSeaBeam2112Specific(Structure):
_fields_ = [
("mode", c_int),
("surface_velocity", c_double),
("ssv_source", c_char),
("ping_gain", c_int),
("pulse_width", c_int),
("transmitter_attenuation", c_int),
("number_algorithms", c_int),
("algorithm_order", SEA_BEAM_ALGORITHM_ORDER),
("spare", SEA_BEAM_SPARE),
]
class c_gsfElacMkIISpecific(Structure):
_fields_ = [
("mode", c_int),
("ping_num", c_int),
("sound_vel", c_int),
("pulse_length", c_int),
("receiver_gain_stbd", c_int),
("receiver_gain_port", c_int),
("reserved", c_int),
]
class c_gsfEM3RunTime(Structure):
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_number", c_int),
("serial_number", c_int),
("system_status", c_int),
("filter_id", c_int),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("pulse_length", c_double),
("transmit_beam_width", c_double),
("power_reduction", c_int),
("receive_beam_width", c_double),
("receive_bandwidth", c_int),
("receive_gain", c_int),
("cross_over_angle", c_int),
("ssv_source", c_int),
("swath_width", c_int),
("beam_spacing", c_int),
("coverage_sector", c_int),
("stabilization", c_int),
("port_swath_width", c_int),
("stbd_swath_width", c_int),
("port_coverage_sector", c_int),
("stbd_coverage_sector", c_int),
("hilo_freq_absorp_ratio", c_int),
("spare1", c_int),
]
EM3_RUN_TIME_2_ARRAY = c_gsfEM3RunTime * 2
class c_gsfEM3Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_number", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_beams", c_int),
("sample_rate", c_int),
("depth_difference", c_double),
("offset_multiplier", c_int),
("run_time", EM3_RUN_TIME_2_ARRAY),
]
EM3_RAW_SPARE_BYTES = c_ubyte * 16
class c_gsfEMRunTime(Structure): # 168 bytes
_fields_ = [
("model_number", c_int),
("dg_time", timespec.c_timespec),
("ping_counter", c_int),
("serial_number", c_int),
("operator_station_status", c_ubyte),
("processing_unit_status", c_ubyte),
("bsp_status", c_ubyte),
("head_transceiver_status", c_ubyte),
("mode", c_ubyte),
("filter_id", c_ubyte),
("min_depth", c_double),
("max_depth", c_double),
("absorption", c_double),
("tx_pulse_length", c_double),
("tx_beam_width", c_double),
("tx_power_re_max", c_double),
("rx_beam_width", c_double),
("rx_bandwidth", c_double),
("rx_fixed_gain", c_double),
("tvg_cross_over_angle", c_double),
("ssv_source", c_ubyte),
("max_port_swath_width", c_int),
("beam_spacing", c_ubyte),
("max_port_coverage", c_int),
("stabilization", c_ubyte),
("max_stbd_coverage", c_int),
("max_stbd_swath_width", c_int),
("durotong_speed", c_double),
("hi_low_absorption_ratio", c_double),
("tx_along_tilt", c_double),
("filter_id_2", c_ubyte),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEMPUStatus(Structure): # 42 bytes
_fields_ = [
("pu_cpu_load", c_double),
("sensor_status", c_ushort),
("achieved_port_coverage", c_int),
("achieved_stbd_coverage", c_int),
("yaw_stabilization", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
class c_gsfEM3RawTxSector(Structure): # 72 bytes
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM3_RAW_SPARE_BYTES),
]
GSF_MAX_EM3_SECTORS = 20
EM3_RAW_SECTORS = c_gsfEM3RawTxSector * GSF_MAX_EM3_SECTORS # 1440 bytes
class c_gsfEM3RawSpecific(Structure): # 1792 bytes (1746 + 23 * 2)
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("vehicle_depth", c_double),
("depth_difference", c_double),
("offset_multiplier", c_int),
("spare_1", EM3_RAW_SPARE_BYTES),
("transmit_sectors", c_int), # 80 bytes
("sector", EM3_RAW_SECTORS), # 1520 bytes
("spare_2", EM3_RAW_SPARE_BYTES), # 1536 bytes
("run_time", c_gsfEMRunTime), # 1704 bytes
("pu_status", c_gsfEMPUStatus), # 1746 bytes
]
RESON8100_SPARE_BYTES = c_char * 2
class c_gsfReson8100Specific(Structure):
_fields_ = [
("latency", c_int),
("ping_number", c_int),
("sonar_id", c_int),
("sonar_model", c_int),
("frequency", c_int),
("surface_velocity", c_double),
("sample_rate", c_int),
("ping_rate", c_int),
("mode", c_int),
("range", c_int),
("power", c_int),
("gain", c_int),
("tvg_spreading", c_int),
("tvg_absorption", c_int),
("fore_aft_bw", c_double),
("athwart_bw", c_double),
("projector_type", c_int),
("projector_angle", c_int),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("filters_active", c_int),
("temperature", c_int),
("beam_spacing", c_double),
("spare", RESON8100_SPARE_BYTES),
]
RESON7100_RESERVED_1 = c_ubyte * 16
RESON7100_RESERVED_2 = c_char * 15
RESON7100_RESERVED_3 = c_char * 8
class c_gsfReson7100Specific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("reserved_1", RESON7100_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_reserved", c_uint),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_uint),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_uint),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("spreading", c_double),
("raw_data_from_7027", c_ubyte),
("reserved_2", RESON7100_RESERVED_2),
("sv_source", c_ubyte),
("layer_comp_flag", c_ubyte),
("reserved_3", RESON7100_RESERVED_3),
]
RESONTSERIES_RESERVED_1 = c_ubyte * 10
RESONTSERIES_RESERVED_2 = c_ubyte * 3
RESONTSERIES_RESERVED_3 = c_ubyte * 32
RESONTSERIES_RESERVED_7027 = c_ubyte * 420
RESONTSERIES_DEVICE_DESCRIPTION = c_char * 60
class c_gsfResonTSeriesSpecific(Structure):
_fields_ = [
("protocol_version", c_uint),
("device_id", c_uint),
("number_devices", c_uint),
("system_enumerator", c_ushort),
("reserved_1", RESONTSERIES_RESERVED_1),
("major_serial_number", c_uint),
("minor_serial_number", c_uint),
("ping_number", c_uint),
("multi_ping_seq", c_uint),
("frequency", c_double),
("sample_rate", c_double),
("receiver_bandwdth", c_double),
("tx_pulse_width", c_double),
("tx_pulse_type_id", c_uint),
("tx_pulse_envlp_id", c_uint),
("tx_pulse_envlp_param", c_double),
("tx_pulse_mode", c_ushort),
("tx_pulse_reserved", c_ushort),
("max_ping_rate", c_double),
("ping_period", c_double),
("range", c_double),
("power", c_double),
("gain", c_double),
("control_flags", c_uint),
("projector_id", c_uint),
("projector_steer_angl_vert", c_double),
("projector_steer_angl_horz", c_double),
("projector_beam_wdth_vert", c_double),
("projector_beam_wdth_horz", c_double),
("projector_beam_focal_pt", c_double),
("projector_beam_weighting_window_type", c_uint),
("projector_beam_weighting_window_param", c_double),
("transmit_flags", c_uint),
("hydrophone_id", c_uint),
("receiving_beam_weighting_window_type", c_uint),
("receiving_beam_weighting_window_param", c_double),
("receive_flags", c_uint),
("receive_beam_width", c_double),
("range_filt_min", c_double),
("range_filt_max", c_double),
("depth_filt_min", c_double),
("depth_filt_max", c_double),
("absorption", c_double),
("sound_velocity", c_double),
("sv_source", c_ubyte),
("spreading", c_double),
("beam_spacing_mode", c_ushort),
("sonar_source_mode", c_ushort),
("coverage_mode", c_ubyte),
("coverage_angle", c_double),
("horizontal_receiver_steering_angle", c_double),
("reserved_2", RESONTSERIES_RESERVED_2),
("uncertainty_type", c_uint),
("transmitter_steering_angle", c_double),
("applied_roll", c_double),
("detection_algorithm", c_ushort),
("detection_flags", c_uint),
("device_description", RESONTSERIES_DEVICE_DESCRIPTION),
("reserved_7027", RESONTSERIES_RESERVED_7027),
("reserved_3", RESONTSERIES_RESERVED_3),
]
EM4_SPARE_BYTES = c_ubyte * 16
class c_gsfEM4TxSector(Structure):
_fields_ = [
("tilt_angle", c_double),
("focus_range", c_double),
("signal_length", c_double),
("transmit_delay", c_double),
("center_frequency", c_double),
("mean_absorption", c_double),
("waveform_id", c_int),
("sector_number", c_int),
("signal_bandwidth", c_double),
("spare", EM4_SPARE_BYTES),
]
EM4_SECTORS = c_gsfEM4TxSector * 9
class c_gsfEM4Specific(Structure):
_fields_ = [
("model_number", c_int),
("ping_counter", c_int),
("serial_number", c_int),
("surface_velocity", c_double),
("transducer_depth", c_double),
("valid_detections", c_int),
("sampling_frequency", c_double),
("doppler_corr_scale", c_uint),
("vehicle_depth", c_double),
("spare_1", EM4_SPARE_BYTES),
("transmit_sectors", c_int),
("sector", EM4_SECTORS),
("spare_2", EM4_SPARE_BYTES),
("run_time", c_gsfEMRunTime),
("pu_status", c_gsfEMPUStatus),
]
GEOSWATH_SPARE_BYTES = c_char * 32
class c_gsfGeoSwathPlusSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("frequency", c_double),
("echosounder_type", c_int),
("ping_number", c_long),
("num_nav_samples", c_int),
("num_attitude_samples", c_int),
("num_heading_samples", c_int),
("num_miniSVS_samples", c_int),
("num_echosounder_samples", c_int),
("num_raa_samples", c_int),
("mean_sv", c_double),
("surface_velocity", c_double),
("valid_beams", c_int),
("sample_rate", c_double),
("pulse_length", c_double),
("ping_length", c_int),
("transmit_power", c_int),
("sidescan_gain_channel", c_int),
("stabilization", c_int),
("gps_quality", c_int),
("range_uncertainty", c_double),
("angle_uncertainty", c_double),
("spare", GEOSWATH_SPARE_BYTES),
]
KLEIN5410_SPARE_BYTES = c_char * 32
class c_gsfKlein5410BssSpecific(Structure):
_fields_ = [
("data_source", c_int),
("side", c_int),
("model_number", c_int),
("acoustic_frequency", c_double),
("sampling_frequency", c_double),
("ping_number", c_uint),
("num_samples", c_uint),
("num_raa_samples", c_uint),
("error_flags", c_uint),
("range", c_uint),
("fish_depth", c_double),
("fish_altitude", c_double),
("sound_speed", c_double),
("tx_waveform", c_int),
("altimeter", c_int),
("raw_data_config", c_uint),
("spare", KLEIN5410_SPARE_BYTES),
]
DELTAT_FILE_TYPE = c_char * 4
DELTAT_SPARE = c_char * 32
class c_gsfDeltaTSpecific(Structure):
_fields_ = [
("decode_file_type", DELTAT_FILE_TYPE),
("version", c_char),
("ping_byte_size", c_int),
("interrogation_time", timespec.c_timespec),
("samples_per_beam", c_int),
("sector_size", c_double),
("start_angle", c_double),
("angle_increment", c_double),
("acoustic_range", c_int),
("acoustic_frequency", c_int),
("sound_velocity", c_double),
("range_resolution", c_double),
("profile_tilt_angle", c_double),
("repetition_rate", c_double),
("ping_number", c_ulong),
("intensity_flag", c_ubyte),
("ping_latency", c_double),
("data_latency", c_double),
("sample_rate_flag", c_ubyte),
("option_flags", c_ubyte),
("num_pings_avg", c_int),
("center_ping_time_offset", c_double),
("user_defined_byte", c_ubyte),
("altitude", c_double),
("external_sensor_flags", c_char),
("pulse_length", c_double),
("fore_aft_beamwidth", c_double),
("athwartships_beamwidth", c_double),
("spare", DELTAT_SPARE),
]
EM12_SPARE = c_char * 32
class c_gsfEM12Specific(Structure):
_fields_ = [
("ping_number", c_int),
("resolution", c_int),
("ping_quality", c_int),
("sound_velocity", c_double),
("mode", c_int),
("spare", EM12_SPARE),
]
R2SONIC_MODELNO = c_ubyte * 12
R2SONIC_SERIALNO = c_ubyte * 12
R2SONIC_INFO = c_double * 12
R2SONIC_SPARE = c_ubyte * 32
class c_gsfR2SonicSpecific(Structure):
_fields_ = [
("model_number", R2SONIC_MODELNO),
("serial_number", R2SONIC_SERIALNO),
("dg_time", timespec.c_timespec),
("ping_number", c_uint),
("ping_period", c_double),
("sound_speed", c_double),
("frequency", c_double),
("tx_power", c_double),
("tx_pulse_width", c_double),
("tx_beamwidth_vert", c_double),
("tx_beamwidth_horiz", c_double),
("tx_steering_vert", c_double),
("tx_steering_horiz", c_double),
("tx_misc_info", c_uint),
("rx_bandwidth", c_double),
("rx_sample_rate", c_double),
("rx_range", c_double),
("rx_gain", c_double),
("rx_spreading", c_double),
("rx_absorption", c_double),
("rx_mount_tilt", c_double),
("rx_misc_info", c_uint),
("reserved", c_ushort),
("num_beams", c_ushort),
("A0_more_info", R2SONIC_INFO),
("A2_more_info", R2SONIC_INFO),
("G0_depth_gate_min", c_double),
("G0_depth_gate_max", c_double),
("G0_depth_gate_slope", c_double),
("spare", R2SONIC_SPARE),
]
SBECHOTRAC_SPARE = c_char * 4
class c_gsfSBEchotracSpecific(Structure):
_fields_ = [
("navigation_error", c_int),
("mpp_source", c_ushort),
("tide_source", c_ushort),
("dynamic_draft", c_double),
("spare", SBECHOTRAC_SPARE),
]
SBMGD77_SPARE = c_char * 4
class c_gsfSBMGD77Specific(Structure):
_fields_ = [
("time_zone_corr", c_ushort),
("position_type_code", c_ushort),
("correction_code", c_ushort),
("bathy_type_code", c_ushort),
("quality_code", c_ushort),
("travel_time", c_double),
("spare", SBMGD77_SPARE),
]
SBBDB_SPARE = c_char * 4
class c_gsfSBBDBSpecific(Structure):
_fields_ = [
("doc_no", c_int),
("eval", c_char),
("classification", c_char),
("track_adj_flag", c_char),
("source_flag", c_char),
("pt_or_track_ln", c_char),
("datum_flag", c_char),
("spare", c_char),
]
SBNOSHDB_SPARE = c_char * 4
class c_gsfSBNOSHDBSpecific(Structure):
_fields_ = [
("type_code", c_ushort),
("carto_code", c_ushort),
("spare", SBNOSHDB_SPARE),
]
SBNAVISOUND_SPARE = c_char * 8
class c_gsfSBNavisoundSpecific(Structure):
_fields_ = [
("pulse_length", c_double),
("spare", SBNAVISOUND_SPARE),
]
KMALL_TX_SECTOR_SPARE_BYTES = c_ubyte * 20
class c_gsfKMALLTxSector(Structure):
_fields_ = [
("txSectorNumb", c_int),
("txArrNumber", c_int),
("txSubArray", c_int),
("sectorTransmitDelay_sec", c_double),
("tiltAngleReTx_deg", c_double),
("txNominalSourceLevel_dB", c_double),
("txFocusRange_m", c_double),
("centreFreq_Hz", c_double),
("signalBandWidth_Hz", c_double),
("totalSignalLength_sec", c_double),
("pulseShading", c_int),
("signalWaveForm", c_int),
("spare1", KMALL_TX_SECTOR_SPARE_BYTES)
]
KMALL_EXTRA_DET_SPARE_BYTES = c_ubyte * 32
class c_gsfKMALLExtraDetClass(Structure):
_fields_ = [
("numExtraDetInClass", c_int),
("alarmFlag", c_int),
("spare", KMALL_EXTRA_DET_SPARE_BYTES)
]
# Sensor specific data structures for the Kongsberg 2040 / SIS 5.0 */
KMALL_SPARE_BYTES_1 = c_ubyte * 8
KMALL_SPARE_BYTES_2 = c_ubyte * 16
KMALL_SPARE_BYTES_3 = c_ubyte * 32
KMALL_SPARE_BYTES_4 = c_ubyte * 32
KMALL_SPARE_BYTES_5 = c_ubyte * 32
KMALL_SECTOR = c_gsfKMALLTxSector * 9
KMALL_EXTRA_DET_CLASS_INFO = c_gsfKMALLExtraDetClass * 11
class c_gsfKMALLSpecific(Structure):
_fields_ = [
("gsfKMALLVersion", c_int),
("dgmType", c_int),
("dgmVersion", c_int),
("systemID", c_int),
("echoSounderID", c_int),
("spare1", KMALL_SPARE_BYTES_1),
("numBytesCmnPart", c_int),
("pingCnt", c_int),
("rxFansPerPing", c_int),
("rxFanIndex", c_int),
("swathsPerPing", c_int),
("swathAlongPosition", c_int),
("txTransducerInd", c_int),
("rxTransducerInd", c_int),
("numRxTransducers", c_int),
("algorithmType", c_int),
("spare2", KMALL_SPARE_BYTES_2),
("numBytesInfoData", c_int),
("pingRate_Hz", c_double),
("beamSpacing", c_int),
("depthMode", c_int),
("subDepthMode", c_int),
("distanceBtwSwath", c_int),
("detectionMode", c_int),
("pulseForm", c_int),
("frequencyMode_Hz", c_double),
("freqRangeLowLim_Hz", c_double),
("freqRangeHighLim_Hz", c_double),
("maxTotalTxPulseLength_sec", c_double),
("maxEffTxPulseLength_sec", c_double),
("maxEffTxBandWidth_Hz", c_double),
("absCoeff_dBPerkm", c_double),
("portSectorEdge_deg", c_double),
("starbSectorEdge_deg", c_double),
("portMeanCov_deg", c_double),
("starbMeanCov_deg", c_double),
("portMeanCov_m", c_double),
("starbMeanCov_m", c_double),
("modeAndStabilisation", c_int),
("runtimeFilter1", c_int),
("runtimeFilter2", c_int),
("pipeTrackingStatus", c_int),
("transmitArraySizeUsed_deg", c_double),
("receiveArraySizeUsed_deg", c_double),
("transmitPower_dB", c_double),
("SLrampUpTimeRemaining", c_int),
("yawAngle_deg", c_double),
("numTxSectors", c_int),
("numBytesPerTxSector", c_int),
("headingVessel_deg", c_double),
("soundSpeedAtTxDepth_mPerSec", c_double),
("txTransducerDepth_m", c_double),
("z_waterLevelReRefPoint_m", c_double),
("x_kmallToall_m", c_double),
("y_kmallToall_m", c_double),
("latLongInfo", c_int),
("posSensorStatus", c_int),
("attitudeSensorStatus", c_int),
("latitude_deg", c_double),
("longitude_deg", c_double),
("ellipsoidHeightReRefPoint_m", c_double),
("spare3", KMALL_SPARE_BYTES_3),
("sector", KMALL_SECTOR),
("numBytesRxInfo", c_int),
("numSoundingsMaxMain", c_int),
("numSoundingsValidMain", c_int),
("numBytesPerSounding", c_int),
("WCSampleRate", c_double),
("seabedImageSampleRate", c_double),
("BSnormal_dB", c_double),
("BSoblique_dB", c_double),
("extraDetectionAlarmFlag", c_int),
("numExtraDetections", c_int),
("numExtraDetectionClasses", c_int),
("numBytesPerClass", c_int),
("spare4", KMALL_SPARE_BYTES_4),
("extraDetClassInfo", KMALL_EXTRA_DET_CLASS_INFO),
("spare5", KMALL_SPARE_BYTES_5)
]
class c_gsfSensorSpecific(Union):
_fields_ = [
("gsfSeaBeamSpecific", c_gsfSeaBeamSpecific),
("gsfEM100Specific", c_gsfEM100Specific),
("gsfEM121ASpecific", c_gsfEM121ASpecific),
("gsfEM121Specific", c_gsfEM121ASpecific),
("gsfSeaBatSpecific", c_gsfSeaBatSpecific),
("gsfEM950Specific", c_gsfEM950Specific),
("gsfEM1000Specific", c_gsfEM950Specific),
("gsfSeamapSpecific", c_gsfSeamapSpecific),
("gsfTypeIIISeaBeamSpecific", c_gsfTypeIIISpecific),
("gsfSASSSpecific", c_gsfTypeIIISpecific),
("gsfCmpSassSpecific", c_gsfCmpSassSpecific),
("gsfSBAmpSpecific", c_gsfSBAmpSpecific),
("gsfSeaBatIISpecific", c_gsfSeaBatIISpecific),
("gsfSeaBat8101Specific", c_gsfSeaBat8101Specific),
("gsfSeaBeam2112Specific", c_gsfSeaBeam2112Specific),
("gsfElacMkIISpecific", c_gsfElacMkIISpecific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
("gsfEM3Specific", c_gsfEM3Specific),
# used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS
# with raw range and beam angle
("gsfEM3RawSpecific", c_gsfEM3RawSpecific),
("gsfReson8100Specific", c_gsfReson8100Specific),
("gsfReson7100Specific", c_gsfReson7100Specific),
# used for T50 and T20
("gsfResonTSeriesSpecific", c_gsfResonTSeriesSpecific),
# used for EM710, EM302, EM122, and EM2040
("gsfEM4Specific", c_gsfEM4Specific),
# DHG 2006/09/27 Use for GeoSwath+ interferometer
("gsfGeoSwathPlusSpecific", c_gsfGeoSwathPlusSpecific),
# Use for Klein 5410 Bathy Sidescan
("gsfKlein5410BssSpecific", c_gsfKlein5410BssSpecific),
("gsfDeltaTSpecific", c_gsfDeltaTSpecific),
("gsfEM12Specific", c_gsfEM12Specific),
("gsfR2SonicSpecific", c_gsfR2SonicSpecific),
("gsfKMallSpecific", c_gsfKMALLSpecific),
("gsfSBEchotracSpecific", c_gsfSBEchotracSpecific),
("gsfSBBathy2000Specific", c_gsfSBEchotracSpecific),
("gsfSBMGD77Specific", c_gsfSBMGD77Specific),
("gsfSBBDBSpecific", c_gsfSBBDBSpecific),
("gsfSBNOSHDBSpecific", c_gsfSBNOSHDBSpecific),
("gsfSBPDDSpecific", c_gsfSBEchotracSpecific),
("gsfSBNavisoundSpecific", c_gsfSBNavisoundSpecific),
]
| gsfpy3_09/gsfSensorSpecific.py | 28,127 | 168 bytes 42 bytes 72 bytes 1440 bytes 1792 bytes (1746 + 23 * 2) 80 bytes 1520 bytes 1536 bytes 1704 bytes 1746 bytes Sensor specific data structures for the Kongsberg 2040 / SIS 5.0 */ used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS used for EM120, EM300, EM1002, EM3000, EM3002, and EM121A_SIS with raw range and beam angle used for T50 and T20 used for EM710, EM302, EM122, and EM2040 DHG 2006/09/27 Use for GeoSwath+ interferometer Use for Klein 5410 Bathy Sidescan | 484 | en | 0.649473 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitude technologie and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestPriceConfigurator(unittest.TestCase):
pass
| shei/shei/doctype/price_configurator/test_price_configurator.py | 228 | -*- coding: utf-8 -*- Copyright (c) 2019, Aptitude technologie and Contributors See license.txt | 95 | en | 0.721956 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid.httpexceptions import HTTPBadRequest
from warehouse.admin.views import journals as views
from ....common.db.accounts import UserFactory
from ....common.db.packaging import JournalEntryFactory, ProjectFactory
class TestProjectList:
def test_no_query(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
result = views.journals_list(db_request)
assert result == {"journals": journals[:25], "query": None}
def test_with_page(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["page"] = "2"
result = views.journals_list(db_request)
assert result == {"journals": journals[25:], "query": None}
def test_with_invalid_page(self):
request = pretend.stub(params={"page": "not an integer"})
with pytest.raises(HTTPBadRequest):
views.journals_list(request)
def test_query_basic(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "{}".format(project0.name),
}
def test_query_term_project(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "project:{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "project:{}".format(project0.name),
}
def test_query_term_user(self, db_request):
user0 = UserFactory.create()
user1 = UserFactory.create()
journals0 = sorted(
[JournalEntryFactory.create(submitted_by=user0) for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(submitted_by=user1) for _ in range(30)]
db_request.GET["q"] = "user:{}".format(user0.username)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "user:{}".format(user0.username),
}
def test_query_term_version(self, db_request):
journals = [JournalEntryFactory.create() for _ in range(10)]
db_request.GET["q"] = "version:{}".format(journals[0].version)
result = views.journals_list(db_request)
assert result == {
"journals": [journals[0]],
"query": "version:{}".format(journals[0].version),
}
def test_query_term_ip(self, db_request):
ipv4 = "10.6.6.6"
ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
journals0 = sorted(
[JournalEntryFactory.create(submitted_from=ipv4) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
journals1 = sorted(
[JournalEntryFactory.create(submitted_from=ipv6) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["q"] = "ip:{}".format(ipv4)
result = views.journals_list(db_request)
assert result == {"journals": journals0, "query": "ip:{}".format(ipv4)}
db_request.GET["q"] = "ip:{}".format(ipv6)
result = views.journals_list(db_request)
assert result == {"journals": journals1, "query": "ip:{}".format(ipv6)}
| tests/unit/admin/views/test_journals.py | 5,072 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 517 | en | 0.872906 |
# Copyright (c) 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.proxy import *
from m5.objects.Scp import Scp
from m5.objects.Doorbell import Doorbell
from m5.util.fdthelper import *
from m5.SimObject import SimObject
class ScmiChannel(SimObject):
"""
Unidirectional channel
"""
type = 'ScmiChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::VirtualChannel"
shmem_range = Param.AddrRange(
"Virtual channel's shared memory address range")
phys_id = Param.Unsigned(4,
"Physical slot of the channel")
virt_id = Param.Unsigned(0,
"Virtual slot of the channel (within the physical)")
doorbell = Param.Doorbell(
"This is the doorbell used to notify the SCMI platform")
def __init__(self, shmem, *args, **kwargs):
super(ScmiChannel, self).__init__(**kwargs)
def shmemGenerator(state):
shmem_node = FdtNode("scp-shmem@%x" % 0)
shmem_node.appendCompatible(["arm,scmi-shmem"])
shmem_node.append(FdtPropertyWords("reg",
state.addrCells(0) +
state.sizeCells(0x200)) )
#shmem_node.appendPhandle(self._parent.unproxy(self).channel)
shmem_node.appendPhandle("scmi_virt" + str(self.virt_id))
return shmem_node
self._shmem = shmem
self._shmem.addSubnodeGenerator(shmemGenerator)
class ScmiAgentChannel(ScmiChannel):
"""
This is a Agent to Platform channel (The agent is the initiator)
"""
type = 'ScmiAgentChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::AgentChannel"
class ScmiPlatformChannel(ScmiChannel):
"""
This is a Platform to Agent channel (The platform is the initiator)
"""
type = 'ScmiPlatformChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::PlatformChannel"
class ScmiCommunication(SimObject):
"""
The SCMI Communication class models a bidirectional
communication between the SCMI platform and the agent.
As such it has a ScmiAgentChannel and a ScmiPlatformChannel
object as members.
"""
type = 'ScmiCommunication'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Communication"
agent_channel = Param.ScmiAgentChannel(
"Agent to Platform channel")
platform_channel = Param.ScmiPlatformChannel(
"Platform to Agent channel")
class ScmiPlatform(Scp):
type = 'ScmiPlatform'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Platform"
comms = VectorParam.ScmiCommunication([],
"SCMI Communications")
agents = VectorParam.String([ "OSPM" ],
"Vector of SCMI agents (names) in the system")
sys = Param.System(Parent.any, "System object parameter")
dma = MasterPort("DMA port")
# Protocol params
base_vendor = Param.String("arm",
"Return string for the Base protocol DISCOVER_VENDOR command")
base_subvendor = Param.String("gem5",
"Return string for the Base protocol DISCOVER_SUBVENDOR command")
base_impl_version = Param.Unsigned(0,
"Return value for the Base protocol "
"DISCOVER_IMPLEMENTATION_VERSION command")
def generateDeviceTree(self, state):
scmi_node = self.generateScmiNode(state)
fw_node = FdtNode("firmware")
fw_node.append(scmi_node)
yield fw_node
def generateScmiNode(self, state):
node = FdtNode("scmi")
node.appendCompatible(["arm,scmi"])
mbox_phandle = state.phandle(self._parent.unproxy(self).mailbox)
shmem_phandles = []
for comm in self.unproxy(self).comms:
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.agent_channel.virt_id)))
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.platform_channel.virt_id)))
phys_channel = 1 # HP-NonSecure
node.append(FdtPropertyWords("mboxes", [ mbox_phandle, phys_channel ]))
node.append(FdtPropertyWords("shmem", shmem_phandles))
return node
| src/dev/arm/css/Scmi.py | 6,142 | This is a Agent to Platform channel (The agent is the initiator)
Unidirectional channel
The SCMI Communication class models a bidirectional
communication between the SCMI platform and the agent.
As such it has a ScmiAgentChannel and a ScmiPlatformChannel
object as members.
This is a Platform to Agent channel (The platform is the initiator)
Copyright (c) 2020 ARM Limited All rights reserved. The license below extends only to copyright in the software and shall not be construed as granting a license to any other intellectual property including but not limited to intellectual property relating to a hardware implementation of the functionality of the software licensed hereunder. You may use the software subject to the license terms below provided that you ensure that this notice is replicated unmodified and in its entirety in all distributions of the software, modified or unmodified, in source code or in binary form. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer; redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution; neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.shmem_node.appendPhandle(self._parent.unproxy(self).channel) Protocol params HP-NonSecure | 2,433 | en | 0.895042 |
#
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules.oneview import common
LOG = logging.getLogger(__name__)
BOOT_DEVICE_MAPPING_TO_OV = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'PXE',
boot_devices.CDROM: 'CD',
}
BOOT_DEVICE_OV_TO_GENERIC = {
v: k
for k, v in BOOT_DEVICE_MAPPING_TO_OV.items()
}
oneview_exceptions = importutils.try_import('oneview_client.exceptions')
class OneViewManagement(base.ManagementInterface):
def get_properties(self):
return common.COMMON_PROPERTIES
def validate(self, task):
"""Checks required info on 'driver_info' and validates node with OneView
Validates whether the 'driver_info' property of the supplied
task's node contains the required info such as server_hardware_uri,
server_hardware_type, server_profile_template_uri and
enclosure_group_uri. Also, checks if the server profile of the node is
applied, if NICs are valid for the server profile of the node, and if
the server hardware attributes (ram, memory, vcpus count) are
consistent with OneView.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if parameters set are inconsistent with
resources in OneView
"""
common.verify_node_info(task.node)
try:
common.validate_oneview_resources_compatibility(task)
except exception.OneViewError as oneview_exc:
raise exception.InvalidParameterValue(oneview_exc)
def get_supported_boot_devices(self, task):
"""Gets a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return sorted(BOOT_DEVICE_MAPPING_TO_OV.keys())
@task_manager.require_exclusive_lock
@common.node_has_server_profile
def set_boot_device(self, task, device, persistent=False):
"""Sets the boot device for a node.
Sets the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of the supported devices
listed in :mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: OperationNotPermitted if the server has no server profile or
if the server is already powered on.
:raises: OneViewError if the communication with OneView fails
"""
oneview_info = common.get_oneview_info(task.node)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(
_("Invalid boot device %s specified.") % device)
LOG.debug("Setting boot device to %(device)s for node %(node)s",
{"device": device, "node": task.node.uuid})
try:
oneview_client = common.get_oneview_client()
device_to_oneview = BOOT_DEVICE_MAPPING_TO_OV.get(device)
oneview_client.set_boot_device(oneview_info, device_to_oneview)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error setting boot device on OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(error=msg)
@common.node_has_server_profile
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Provides the current boot device of the node.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
:raises: OperationNotPermitted if no Server Profile is associated with
the node
:raises: InvalidParameterValue if the boot device is unknown
:raises: OneViewError if the communication with OneView fails
"""
oneview_info = common.get_oneview_info(task.node)
try:
oneview_client = common.get_oneview_client()
boot_order = oneview_client.get_boot_order(oneview_info)
except oneview_exceptions.OneViewException as oneview_exc:
msg = (_(
"Error getting boot device from OneView. Error: %s")
% oneview_exc
)
LOG.error(msg)
raise exception.OneViewError(msg)
primary_device = boot_order[0]
if primary_device not in BOOT_DEVICE_OV_TO_GENERIC:
raise exception.InvalidParameterValue(
_("Unsupported boot Device %(device)s for Node: %(node)s")
% {"device": primary_device, "node": task.node.uuid}
)
boot_device = {
'boot_device': BOOT_DEVICE_OV_TO_GENERIC.get(primary_device),
'persistent': True,
}
return boot_device
def get_sensors_data(self, task):
"""Get sensors data.
Not implemented by this driver.
:param task: a TaskManager instance.
"""
raise NotImplementedError()
| ironic/drivers/modules/oneview/management.py | 6,510 | Get the current boot device for the task's node.
Provides the current boot device of the node.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` [PXE, DISK, CDROM]
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
:raises: OperationNotPermitted if no Server Profile is associated with
the node
:raises: InvalidParameterValue if the boot device is unknown
:raises: OneViewError if the communication with OneView fails
Get sensors data.
Not implemented by this driver.
:param task: a TaskManager instance.
Gets a list of the supported boot devices.
:param task: a task from TaskManager.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
Sets the boot device for a node.
Sets the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of the supported devices
listed in :mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: OperationNotPermitted if the server has no server profile or
if the server is already powered on.
:raises: OneViewError if the communication with OneView fails
Checks required info on 'driver_info' and validates node with OneView
Validates whether the 'driver_info' property of the supplied
task's node contains the required info such as server_hardware_uri,
server_hardware_type, server_profile_template_uri and
enclosure_group_uri. Also, checks if the server profile of the node is
applied, if NICs are valid for the server profile of the node, and if
the server hardware attributes (ram, memory, vcpus count) are
consistent with OneView.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if parameters set are inconsistent with
resources in OneView
Copyright 2015 Hewlett Packard Development Company, LP Copyright 2015 Universidade Federal de Campina Grande Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2,814 | en | 0.749098 |
#################################################
# Retrieve robust classifier from:
# https://github.com/MadryLab/robustness
#################################################
from robustness.datasets import CIFAR, RestrictedImageNet, ImageNet
from robustness.model_utils import make_and_restore_model
def get_robust_classifier(dataset, model_path, parallel=True):
if dataset == "cifar10":
model, _ = make_and_restore_model(arch='resnet50', dataset=CIFAR(), \
resume_path=model_path, parallel=parallel)
elif dataset == "RestrictedImageNet":
model, _ = make_and_restore_model(arch='resnet50', dataset=RestrictedImageNet(''), \
resume_path=model_path, parallel=parallel)
elif dataset == "ImageNet":
model, _ = make_and_restore_model(arch='resnet50', dataset=ImageNet(''), \
resume_path=model_path, parallel=parallel)
else:
raise NotImplementedError("Model for {} is not implemented!".format(dataset))
model.eval()
return model
if __name__ == "__main__":
netC = get_robust_classifier("cifar10", "pretrained/cifar_l2_0_5.pt")
import torch, torchvision
import numpy as np
import torchvision.transforms as transforms
from torch.nn import functional as F
with torch.no_grad():
test_dir = "../output_imgs/cifar10_new9_cLoss10.0"
transform=transforms.Compose([
transforms.Scale(32),
transforms.ToTensor()#,
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = torchvision.datasets.ImageFolder(test_dir, transform=transform)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=16, num_workers=16, shuffle=False)
for item, data in enumerate(data_loader):
print(data[0].shape)
output, _ = netC(data[0])
output = F.softmax(output).data.cpu().numpy()
print(output.shape)
argmax = np.argmax(output, axis=-1)
print(argmax.squeeze())
maxp = np.amax(output, axis=-1)
print(maxp.squeeze())
| classifiers/robust_classifier.py | 2,186 | Retrieve robust classifier from: https://github.com/MadryLab/robustness, transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | 159 | en | 0.611126 |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.configuration.backend
from ....lo.configuration.backend.multi_layer_stratum import MultiLayerStratum as MultiLayerStratum
__all__ = ['MultiLayerStratum']
| ooobuild/dyn/configuration/backend/multi_layer_stratum.py | 889 | coding: utf-8 Copyright 2022 :Barry-Thomas-Paul: Moss Licensed under the Apache License, Version 2.0 (the "License") you may not use this file except in compliance with the License. You may obtain a copy of the License at http: // www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Service Class this is a auto generated file generated by Cheetah Libre Office Version: 7.3 Namespace: com.sun.star.configuration.backend | 713 | en | 0.829902 |
#!/usr/bin/python3
import sys
import json
import getopt
import os
import jsonschema
import subprocess
if os.geteuid() != 0:
print('You must be a root user')
sys.exit(72)
json_file = ''
nginx_conf = '/etc/nginx/nginx.conf'
schema_file = ''
test = False
#------Parse command-line options------
def usage():
print ('Usage: ' + sys.argv[0] + ' -j json_file [-c nginx_ conf] [-s schema_file] [-t] [-v] [-h]')
print (' options:')
print (' -j json_file : JSON file (required option)')
print (' -c nginx_conf : Nginx config file (default: /etc/nginx/nginx.conf)')
print (' -s schema_file : JSON schema file')
print (" -t : Test Nginx config file by command '/usr/sbin/nginx -t -c <nginx.conf>'")
print (' -v : Version')
print (' -h : Show this help page')
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hvtj:c:s:')
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(73)
if len(args) != 0:
print('Incorrect options: ' + ' '.join(args))
usage()
sys.exit(74)
else:
for o, a in opts:
if o == '-h':
usage()
sys.exit()
elif o == '-v':
print('version: 0.0.1')
sys.exit()
elif o == '-t':
test = True
elif o == '-j':
json_file = a
elif o == '-c':
nginx_conf = a
elif o == '-s':
schema_file = a
if json_file == '':
print('JSON file is required')
usage()
sys.exit(75)
#------Get json and schema data------
try:
fh = open(json_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(json_file))
sys.exit(76)
data=json.load(fh)
fh.close()
if schema_file != '':
try:
fh = open(schema_file, 'r')
except IOError:
print("Could not opent the file '{0}' for reading".format(schema_file))
sys.exit(77)
schema=json.load(fh)
fh.close()
try:
jsonschema.validate(data, schema)
except Exception as e:
print(e)
sys.exit(78)
#------Nginx functions------
def pcrejit():
try:
output = subprocess.check_output('/usr/sbin/nginx -V', stderr=subprocess.STDOUT, shell=True)
if output.decode().find('--with-pcre-jit') != -1:
return 'on'
else:
return 'off'
except Exception:
return 'off'
def test_conf():
if test:
try:
output = subprocess.check_output('/usr/sbin/nginx -t -c ' + nginx_conf, stderr=subprocess.STDOUT, shell=True)
print(output.decode())
except Exception as e:
print(e)
#------Test 'location /'------
location_root_test = []
for server in data.get('http').get('server'):
for location in server.get('location'):
location_root_test.append(location.get('URI'))
if '/' not in location_root_test:
print("There is not 'location /' in JSON file")
sys.exit(79)
#------Make Nginx config file------
try:
fh = open(nginx_conf, 'w')
except IOError:
print("Could not open the file '{0}' for writing".format(nginx_conf))
sys.exit(78)
fh.write( 'user ' + json.dumps(data.get('user')) + ';\n' )
fh.write( 'worker_processes ' + json.dumps(data.get('worker_processes')) + ';\n' )
fh.write( 'error_log ' + json.dumps(data.get('error_log').get('file')) + ' '
+ json.dumps(data.get('error_log').get('level')) + ';\n' )
fh.write( 'pid ' + json.dumps(data.get('pid')) + ';\n' )
fh.write( 'pcre_jit ' + pcrejit() + ';\n' )
fh.write( 'events { worker_connections ' + json.dumps(data.get('events').get('worker_connections')) + '; }\n' )
fh.write( 'http {\n')
fh.write( ' include ' + json.dumps(data.get('http').get('include')) + ';\n' )
fh.write( ' default_type ' + json.dumps(data.get('http').get('default_type')) + ';\n' )
fh.write( ' log_format ' + json.dumps(data.get('http').get('log_format').get('name')) + " "
+ json.dumps(data.get('http').get('log_format').get('string')) + ";\n" )
fh.write( ' access_log ' + json.dumps(data.get('http').get('access_log').get('file')) + ' '
+ json.dumps(data.get('http').get('access_log').get('name')) + ';\n' )
for server in data.get('http').get('server'):
fh.write(' server {\n')
fh.write(' listen ' + json.dumps(server.get('listen')) + ';\n')
fh.write(' server_name ' + json.dumps(server.get('server_name')) + ';\n')
# noindex 'location = /robots.txt'
for extra in server.get('extra', []):
if extra == 'noindex':
fh.write(' location = /robots.txt {\n')
fh.write(' default_type "text/plain";\n')
fh.write(' return 200 "User-agent: *\\nDisallow: /";\n')
fh.write(' }\n')
for location in server.get('location'):
fh.write(' location ' + location.get('modifier') + ' '
+ location.get('URI') + ' {\n')
for configuration in location.get('configuration'):
if configuration == 'proxy_set_header':
for proxy_set_header in location.get('configuration').get(configuration):
fh.write(' proxy_set_header ' + proxy_set_header.get('field') + ' '
+ json.dumps(proxy_set_header.get('value')) + ';\n')
elif configuration == 'return':
fh.write(' return ' + location.get('configuration').get(configuration).get('code') + ' '
+ json.dumps(location.get('configuration').get(configuration).get('text')) + ';\n')
else:
fh.write(' ' + configuration + ' ' + json.dumps(location.get('configuration').get(configuration)) + ';\n')
fh.write( ' }\n' )
fh.write( ' }\n' )
for upstream in data.get('http').get('upstream'):
fh.write(' upstream ' + json.dumps(upstream.get('name')) + ' {\n')
for server in upstream.get('server'):
fh.write(' server ' + json.dumps(server.get('address')))
for parameter in server.get('parameters'):
fh.write(' ' + json.dumps(parameter))
fh.write(';\n')
fh.write( ' }\n' )
fh.write( '}\n')
fh.close()
test_conf()
| nginx_conf_gen.py | 5,982 | !/usr/bin/python3------Parse command-line options------------Get json and schema data------------Nginx functions------------Test 'location /'------------Make Nginx config file------ noindex 'location = /robots.txt' | 214 | en | 0.196998 |
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.customer import CustomerSignin, CustomerSignInResult
from ...models.error import ErrorResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyLoginRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def post(
self,
body: "CustomerSignin",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["CustomerSignInResult"]:
"""Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
"""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/login",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return CustomerSignInResult.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
elif response.status_code == 200:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
| src/commercetools/platform/client/login/by_project_key_login_request_builder.py | 2,363 | Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
This file is automatically generated by the rmf-codegen project. The Python code generator is maintained by Lab Digital. If you want to contribute to this project then please do not edit this file directly but send a pull request to the Lab Digital fork of rmf-codegen at https://github.com/labd/rmf-codegen | 772 | en | 0.881798 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from unittest import mock
import httpretty
import requests
from requests.adapters import BaseAdapter
from requests.models import Response
import opentelemetry.instrumentation.requests
from opentelemetry import context, trace
# FIXME: fix the importing of this private attribute when the location of the _SUPPRESS_HTTP_INSTRUMENTATION_KEY is defined.
from opentelemetry.context import _SUPPRESS_HTTP_INSTRUMENTATION_KEY
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
from opentelemetry.propagate import get_global_textmap, set_global_textmap
from opentelemetry.sdk import resources
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.mock_textmap import MockTextMapPropagator
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace import StatusCode
from opentelemetry.util.http import get_excluded_urls
class TransportMock:
def read(self, *args, **kwargs):
pass
class MyAdapter(BaseAdapter):
def __init__(self, response):
super().__init__()
self._response = response
def send(self, *args, **kwargs): # pylint:disable=signature-differs
return self._response
def close(self):
pass
class InvalidResponseObjectException(Exception):
def __init__(self):
super().__init__()
self.response = {}
class RequestsIntegrationTestBase(abc.ABC):
# pylint: disable=no-member
# pylint: disable=too-many-public-methods
URL = "http://httpbin.org/status/200"
# pylint: disable=invalid-name
def setUp(self):
super().setUp()
self.env_patch = mock.patch.dict(
"os.environ",
{
"OTEL_PYTHON_REQUESTS_EXCLUDED_URLS": "http://localhost/env_excluded_arg/123,env_excluded_noarg"
},
)
self.env_patch.start()
self.exclude_patch = mock.patch(
"opentelemetry.instrumentation.requests._excluded_urls_from_env",
get_excluded_urls("REQUESTS"),
)
self.exclude_patch.start()
RequestsInstrumentor().instrument()
httpretty.enable()
httpretty.register_uri(httpretty.GET, self.URL, body="Hello!")
# pylint: disable=invalid-name
def tearDown(self):
super().tearDown()
self.env_patch.stop()
RequestsInstrumentor().uninstrument()
httpretty.disable()
def assert_span(self, exporter=None, num_spans=1):
if exporter is None:
exporter = self.memory_exporter
span_list = exporter.get_finished_spans()
self.assertEqual(num_spans, len(span_list))
if num_spans == 0:
return None
if num_spans == 1:
return span_list[0]
return span_list
@staticmethod
@abc.abstractmethod
def perform_request(url: str, session: requests.Session = None):
pass
def test_basic(self):
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span()
self.assertIs(span.kind, trace.SpanKind.CLIENT)
self.assertEqual(span.name, "HTTP GET")
self.assertEqual(
span.attributes,
{
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_URL: self.URL,
SpanAttributes.HTTP_STATUS_CODE: 200,
},
)
self.assertIs(span.status.status_code, trace.StatusCode.UNSET)
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.requests
)
def test_name_callback(self):
def name_callback(method, url):
return "GET" + url
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(name_callback=name_callback)
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span()
self.assertEqual(span.name, "GET" + self.URL)
def test_excluded_urls_explicit(self):
url_404 = "http://httpbin.org/status/404"
httpretty.register_uri(
httpretty.GET,
url_404,
status=404,
)
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(excluded_urls=".*/404")
self.perform_request(self.URL)
self.perform_request(url_404)
self.assert_span(num_spans=1)
def test_excluded_urls_from_env(self):
url = "http://localhost/env_excluded_arg/123"
httpretty.register_uri(
httpretty.GET,
url,
status=200,
)
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument()
self.perform_request(self.URL)
self.perform_request(url)
self.assert_span(num_spans=1)
def test_name_callback_default(self):
def name_callback(method, url):
return 123
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(name_callback=name_callback)
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span()
self.assertEqual(span.name, "HTTP GET")
def test_not_foundbasic(self):
url_404 = "http://httpbin.org/status/404"
httpretty.register_uri(
httpretty.GET,
url_404,
status=404,
)
result = self.perform_request(url_404)
self.assertEqual(result.status_code, 404)
span = self.assert_span()
self.assertEqual(
span.attributes.get(SpanAttributes.HTTP_STATUS_CODE), 404
)
self.assertIs(
span.status.status_code,
trace.StatusCode.ERROR,
)
def test_uninstrument(self):
RequestsInstrumentor().uninstrument()
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
self.assert_span(num_spans=0)
# instrument again to avoid annoying warning message
RequestsInstrumentor().instrument()
def test_uninstrument_session(self):
session1 = requests.Session()
RequestsInstrumentor().uninstrument_session(session1)
result = self.perform_request(self.URL, session1)
self.assertEqual(result.text, "Hello!")
self.assert_span(num_spans=0)
# Test that other sessions as well as global requests is still
# instrumented
session2 = requests.Session()
result = self.perform_request(self.URL, session2)
self.assertEqual(result.text, "Hello!")
self.assert_span()
self.memory_exporter.clear()
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
self.assert_span()
def test_suppress_instrumentation(self):
token = context.attach(
context.set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)
)
try:
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
finally:
context.detach(token)
self.assert_span(num_spans=0)
def test_suppress_http_instrumentation(self):
token = context.attach(
context.set_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY, True)
)
try:
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
finally:
context.detach(token)
self.assert_span(num_spans=0)
def test_not_recording(self):
with mock.patch("opentelemetry.trace.INVALID_SPAN") as mock_span:
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(
tracer_provider=trace.NoOpTracerProvider()
)
mock_span.is_recording.return_value = False
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
self.assert_span(None, 0)
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
def test_distributed_context(self):
previous_propagator = get_global_textmap()
try:
set_global_textmap(MockTextMapPropagator())
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span()
headers = dict(httpretty.last_request().headers)
self.assertIn(MockTextMapPropagator.TRACE_ID_KEY, headers)
self.assertEqual(
str(span.get_span_context().trace_id),
headers[MockTextMapPropagator.TRACE_ID_KEY],
)
self.assertIn(MockTextMapPropagator.SPAN_ID_KEY, headers)
self.assertEqual(
str(span.get_span_context().span_id),
headers[MockTextMapPropagator.SPAN_ID_KEY],
)
finally:
set_global_textmap(previous_propagator)
def test_span_callback(self):
RequestsInstrumentor().uninstrument()
def span_callback(span, result: requests.Response):
span.set_attribute(
"http.response.body", result.content.decode("utf-8")
)
RequestsInstrumentor().instrument(
tracer_provider=self.tracer_provider,
span_callback=span_callback,
)
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span()
self.assertEqual(
span.attributes,
{
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_URL: self.URL,
SpanAttributes.HTTP_STATUS_CODE: 200,
"http.response.body": "Hello!",
},
)
def test_custom_tracer_provider(self):
resource = resources.Resource.create({})
result = self.create_tracer_provider(resource=resource)
tracer_provider, exporter = result
RequestsInstrumentor().uninstrument()
RequestsInstrumentor().instrument(tracer_provider=tracer_provider)
result = self.perform_request(self.URL)
self.assertEqual(result.text, "Hello!")
span = self.assert_span(exporter=exporter)
self.assertIs(span.resource, resource)
@mock.patch(
"requests.adapters.HTTPAdapter.send",
side_effect=requests.RequestException,
)
def test_requests_exception_without_response(self, *_, **__):
with self.assertRaises(requests.RequestException):
self.perform_request(self.URL)
span = self.assert_span()
self.assertEqual(
span.attributes,
{
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_URL: self.URL,
},
)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
mocked_response = requests.Response()
mocked_response.status_code = 500
mocked_response.reason = "Internal Server Error"
@mock.patch(
"requests.adapters.HTTPAdapter.send",
side_effect=InvalidResponseObjectException,
)
def test_requests_exception_without_proper_response_type(self, *_, **__):
with self.assertRaises(InvalidResponseObjectException):
self.perform_request(self.URL)
span = self.assert_span()
self.assertEqual(
span.attributes,
{
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_URL: self.URL,
},
)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
mocked_response = requests.Response()
mocked_response.status_code = 500
mocked_response.reason = "Internal Server Error"
@mock.patch(
"requests.adapters.HTTPAdapter.send",
side_effect=requests.RequestException(response=mocked_response),
)
def test_requests_exception_with_response(self, *_, **__):
with self.assertRaises(requests.RequestException):
self.perform_request(self.URL)
span = self.assert_span()
self.assertEqual(
span.attributes,
{
SpanAttributes.HTTP_METHOD: "GET",
SpanAttributes.HTTP_URL: self.URL,
SpanAttributes.HTTP_STATUS_CODE: 500,
},
)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
@mock.patch("requests.adapters.HTTPAdapter.send", side_effect=Exception)
def test_requests_basic_exception(self, *_, **__):
with self.assertRaises(Exception):
self.perform_request(self.URL)
span = self.assert_span()
self.assertEqual(span.status.status_code, StatusCode.ERROR)
@mock.patch(
"requests.adapters.HTTPAdapter.send", side_effect=requests.Timeout
)
def test_requests_timeout_exception(self, *_, **__):
with self.assertRaises(Exception):
self.perform_request(self.URL)
span = self.assert_span()
self.assertEqual(span.status.status_code, StatusCode.ERROR)
def test_adapter_with_custom_response(self):
response = Response()
response.status_code = 210
response.reason = "hello adapter"
response.raw = TransportMock()
session = requests.Session()
session.mount(self.URL, MyAdapter(response))
self.perform_request(self.URL, session)
span = self.assert_span()
self.assertEqual(
span.attributes,
{
"http.method": "GET",
"http.url": self.URL,
"http.status_code": 210,
},
)
class TestRequestsIntegration(RequestsIntegrationTestBase, TestBase):
@staticmethod
def perform_request(url: str, session: requests.Session = None):
if session is None:
return requests.get(url)
return session.get(url)
def test_invalid_url(self):
url = "http://[::1/nope"
with self.assertRaises(ValueError):
requests.post(url)
span = self.assert_span()
self.assertEqual(span.name, "HTTP POST")
self.assertEqual(
span.attributes,
{SpanAttributes.HTTP_METHOD: "POST", SpanAttributes.HTTP_URL: url},
)
self.assertEqual(span.status.status_code, StatusCode.ERROR)
def test_credential_removal(self):
new_url = "http://username:password@httpbin.org/status/200"
self.perform_request(new_url)
span = self.assert_span()
self.assertEqual(span.attributes[SpanAttributes.HTTP_URL], self.URL)
def test_if_headers_equals_none(self):
result = requests.get(self.URL, headers=None)
self.assertEqual(result.text, "Hello!")
self.assert_span()
class TestRequestsIntegrationPreparedRequest(
RequestsIntegrationTestBase, TestBase
):
@staticmethod
def perform_request(url: str, session: requests.Session = None):
if session is None:
session = requests.Session()
request = requests.Request("GET", url)
prepared_request = session.prepare_request(request)
return session.send(prepared_request)
class TestRequestsIntergrationMetric(TestBase):
URL = "http://examplehost:8000/status/200"
def setUp(self):
super().setUp()
RequestsInstrumentor().instrument(meter_provider=self.meter_provider)
httpretty.enable()
httpretty.register_uri(httpretty.GET, self.URL, body="Hello!")
def tearDown(self):
super().tearDown()
RequestsInstrumentor().uninstrument()
httpretty.disable()
@staticmethod
def perform_request(url: str) -> requests.Response:
return requests.get(url)
def test_basic_metric_success(self):
self.perform_request(self.URL)
expected_attributes = {
"http.status_code": 200,
"http.host": "examplehost",
"net.peer.port": 8000,
"net.peer.name": "examplehost",
"http.method": "GET",
"http.flavor": "1.1",
"http.scheme": "http",
}
for (
resource_metrics
) in self.memory_metrics_reader.get_metrics_data().resource_metrics:
for scope_metrics in resource_metrics.scope_metrics:
for metric in scope_metrics.metrics:
for data_point in metric.data.data_points:
self.assertDictEqual(
expected_attributes, dict(data_point.attributes)
)
self.assertEqual(data_point.count, 1)
| instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py | 17,528 | Copyright The OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. FIXME: fix the importing of this private attribute when the location of the _SUPPRESS_HTTP_INSTRUMENTATION_KEY is defined. pylint:disable=signature-differs pylint: disable=no-member pylint: disable=too-many-public-methods pylint: disable=invalid-name pylint: disable=invalid-name instrument again to avoid annoying warning message Test that other sessions as well as global requests is still instrumented | 962 | en | 0.8262 |
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class InstanceSpec(object):
def __init__(self, vpcId, subnetId, instanceVersion, instanceName, azId, instanceClass, ipVersion=None, dedicatedMaster=None, coordinating=None, autoSnapshot=None, authConfig=None):
"""
:param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
"""
self.vpcId = vpcId
self.subnetId = subnetId
self.instanceVersion = instanceVersion
self.instanceName = instanceName
self.azId = azId
self.instanceClass = instanceClass
self.ipVersion = ipVersion
self.dedicatedMaster = dedicatedMaster
self.coordinating = coordinating
self.autoSnapshot = autoSnapshot
self.authConfig = authConfig
| jdcloud_sdk/services/es/models/InstanceSpec.py | 2,298 | :param vpcId: 私有网络vpcId
:param subnetId: 子网subnetId
:param instanceVersion: es版本,当前支持5.6.9和6.5.4
:param instanceName: es集群名称,不可为空,只支持大小写字母、数字、英文下划线或者中划线,以字母开头且不能超过32位
:param azId: 可用区,各可用区编码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/restrictions
:param instanceClass: 规格配置,规格代码请参考:https://docs.jdcloud.com/cn/jcs-for-elasticsearch/specifications
:param ipVersion: (Optional) 是否支持ipv6,支持值为v4&v6,不支持为空
:param dedicatedMaster: (Optional) 是否包含专用主节点,默认false
:param coordinating: (Optional) 是否包含协调节点,默认false
:param autoSnapshot: (Optional) 自动快照设置。
:param authConfig: (Optional) es数据面身份验证设置信息
coding=utf8 Copyright 2018 JDCLOUD.COM Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE: This class is auto generated by the jdcloud code generator program. | 1,239 | en | 0.499441 |
# coding=utf-8
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
import re
import warnings
warnings.filterwarnings('ignore')
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| preliminary_contest/nezha_pretrain/tokenization.py | 12,970 | Runs basic tokenization (punctuation splitting, lower casing, etc.).
Runs end-to-end tokenziation.
Runs WordPiece tokenziation.
Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
Performs invalid character removal and whitespace cleanup on text.
Checks whether CP is the codepoint of a CJK character.
Checks whether `chars` is a control character.
Checks whether `chars` is a punctuation character.
Checks whether `chars` is a whitespace character.
Splits punctuation on a piece of text.
Strips accents from a piece of text.
Adds whitespace around any CJK character.
Converts a sequence of [tokens|ids] using the vocab.
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Loads a vocabulary file into a dictionary.
Returns text encoded in a way suitable for print or `tf.logging`.
Tokenizes a piece of text.
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
Checks whether the casing config is consistent with the checkpoint name.
Runs basic whitespace cleaning and splitting on a piece of text.
Tokenization classes.
coding=utf-8 The casing has to be passed in by the user and there is no explicit check as to whether it matches the checkpoint. The casing information probably should have been stored in the bert_config.json file, but it's not, so we have to heuristically detect it to validate. These functions want `str` for both Python2 and Python3, but in one case it's a Unicode string and in the other it's a byte string. This was added on November 1st, 2018 for the multilingual and Chinese models. This is also applied to the English models now, but it doesn't matter since the English models were not trained on any Chinese data and generally don't have any Chinese data in them (there are Chinese characters in the vocabulary because Wikipedia does have some Chinese words in the English Wikipedia.). This defines a "chinese character" as anything in the CJK Unicode block: https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) Note that the CJK Unicode block is NOT all Japanese and Korean characters, despite its name. The modern Korean Hangul alphabet is a different block, as is Japanese Hiragana and Katakana. Those alphabets are used to write space-separated words, so they are not treated specially and handled like the all of the other languages. \t, \n, and \r are technically contorl characters but we treat them as whitespace since they are generally considered as such. These are technically control characters but we count them as whitespace characters. We treat all non-letter/number ASCII as punctuation. Characters such as "^", "$", and "`" are not in the Unicode Punctuation class but we treat them as punctuation anyways, for consistency. | 3,082 | en | 0.904254 |
import io
from electrum.lnmsg import (read_bigsize_int, write_bigsize_int, FieldEncodingNotMinimal,
UnexpectedEndOfStream, LNSerializer, UnknownMandatoryTLVRecordType,
MalformedMsg, MsgTrailingGarbage, MsgInvalidFieldOrder, encode_msg,
decode_msg, UnexpectedFieldSizeForEncoder, OnionWireSerializer,
UnknownMsgType)
from electrum.lnonion import OnionRoutingFailure
from electrum.util import bfh
from electrum.lnutil import ShortChannelID, LnFeatures
from electrum import constants
from . import TestCaseForTestnet
class TestLNMsg(TestCaseForTestnet):
def test_write_bigsize_int(self):
self.assertEqual(bfh("00"), write_bigsize_int(0))
self.assertEqual(bfh("fc"), write_bigsize_int(252))
self.assertEqual(bfh("fd00fd"), write_bigsize_int(253))
self.assertEqual(bfh("fdffff"), write_bigsize_int(65535))
self.assertEqual(bfh("fe00010000"), write_bigsize_int(65536))
self.assertEqual(bfh("feffffffff"), write_bigsize_int(4294967295))
self.assertEqual(bfh("ff0000000100000000"), write_bigsize_int(4294967296))
self.assertEqual(bfh("ffffffffffffffffff"), write_bigsize_int(18446744073709551615))
def test_read_bigsize_int(self):
self.assertEqual(0, read_bigsize_int(io.BytesIO(bfh("00"))))
self.assertEqual(252, read_bigsize_int(io.BytesIO(bfh("fc"))))
self.assertEqual(253, read_bigsize_int(io.BytesIO(bfh("fd00fd"))))
self.assertEqual(65535, read_bigsize_int(io.BytesIO(bfh("fdffff"))))
self.assertEqual(65536, read_bigsize_int(io.BytesIO(bfh("fe00010000"))))
self.assertEqual(4294967295, read_bigsize_int(io.BytesIO(bfh("feffffffff"))))
self.assertEqual(4294967296, read_bigsize_int(io.BytesIO(bfh("ff0000000100000000"))))
self.assertEqual(18446744073709551615, read_bigsize_int(io.BytesIO(bfh("ffffffffffffffffff"))))
with self.assertRaises(FieldEncodingNotMinimal):
read_bigsize_int(io.BytesIO(bfh("fd00fc")))
with self.assertRaises(FieldEncodingNotMinimal):
read_bigsize_int(io.BytesIO(bfh("fe0000ffff")))
with self.assertRaises(FieldEncodingNotMinimal):
read_bigsize_int(io.BytesIO(bfh("ff00000000ffffffff")))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("fd00")))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("feffff")))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("ffffffffff")))
self.assertEqual(None, read_bigsize_int(io.BytesIO(bfh(""))))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("fd")))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("fe")))
with self.assertRaises(UnexpectedEndOfStream):
read_bigsize_int(io.BytesIO(bfh("ff")))
def test_read_tlv_stream_tests1(self):
# from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.md#tlv-decoding-failures
lnser = LNSerializer()
for tlv_stream_name in ("n1", "n2"):
with self.subTest(tlv_stream_name=tlv_stream_name):
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd01")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd000100")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd0101")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0ffd")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0ffd26")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0ffd2602")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0ffd000100")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0ffd0201000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")), tlv_stream_name="n1")
with self.assertRaises(UnknownMandatoryTLVRecordType):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("1200")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnknownMandatoryTLVRecordType):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd010200")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnknownMandatoryTLVRecordType):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fe0100000200")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(UnknownMandatoryTLVRecordType):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("ff010000000000000200")), tlv_stream_name=tlv_stream_name)
with self.assertRaises(MsgTrailingGarbage):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0109ffffffffffffffffff")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010100")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01020001")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0103000100")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010400010000")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01050001000000")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0106000100000000")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010700010000000000")), tlv_stream_name="n1")
with self.assertRaises(FieldEncodingNotMinimal):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01080001000000000000")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("020701010101010101")), tlv_stream_name="n1")
with self.assertRaises(MsgTrailingGarbage):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0209010101010101010101")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0321023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0329023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb0000000000000001")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0330023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb000000000000000100000000000001")), tlv_stream_name="n1")
# check if ECC point is valid?... skip for now.
#with self.assertRaises(Exception):
# lnser.read_tlv_stream(fd=io.BytesIO(bfh("0331043da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb00000000000000010000000000000002")), tlv_stream_name="n1")
with self.assertRaises(MsgTrailingGarbage):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0332023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb0000000000000001000000000000000001")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00fe00")), tlv_stream_name="n1")
with self.assertRaises(UnexpectedEndOfStream):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00fe0101")), tlv_stream_name="n1")
with self.assertRaises(MsgTrailingGarbage):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00fe03010101")), tlv_stream_name="n1")
with self.assertRaises(UnknownMandatoryTLVRecordType):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0000")), tlv_stream_name="n1")
def test_read_tlv_stream_tests2(self):
# from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.md#tlv-decoding-successes
lnser = LNSerializer()
for tlv_stream_name in ("n1", "n2"):
with self.subTest(tlv_stream_name=tlv_stream_name):
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("2100")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd020100")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00fd00")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00ff00")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("fe0200000100")), tlv_stream_name=tlv_stream_name))
self.assertEqual({}, lnser.read_tlv_stream(fd=io.BytesIO(bfh("ff020000000000000100")), tlv_stream_name=tlv_stream_name))
self.assertEqual({"tlv1": {"amount_msat": 0}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0100")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 1}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010101")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 256}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01020100")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 65536}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0103010000")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 16777216}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010401000000")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 4294967296}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01050100000000")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 1099511627776}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0106010000000000")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 281474976710656}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("010701000000000000")), tlv_stream_name="n1"))
self.assertEqual({"tlv1": {"amount_msat": 72057594037927936}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("01080100000000000000")), tlv_stream_name="n1"))
self.assertEqual({"tlv2": {"scid": ShortChannelID.from_components(0, 0, 550)}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("02080000000000000226")), tlv_stream_name="n1"))
self.assertEqual({"tlv3": {"node_id": bfh("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb"),
"amount_msat_1": 1,
"amount_msat_2": 2}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0331023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb00000000000000010000000000000002")), tlv_stream_name="n1"))
self.assertEqual({"tlv4": {"cltv_delta": 550}},
lnser.read_tlv_stream(fd=io.BytesIO(bfh("fd00fe020226")), tlv_stream_name="n1"))
def test_read_tlv_stream_tests3(self):
# from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.md#tlv-stream-decoding-failure
lnser = LNSerializer()
with self.assertRaises(MsgInvalidFieldOrder):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0208000000000000022601012a")), tlv_stream_name="n1")
with self.assertRaises(MsgInvalidFieldOrder):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("0208000000000000023102080000000000000451")), tlv_stream_name="n1")
with self.assertRaises(MsgInvalidFieldOrder):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("1f000f012a")), tlv_stream_name="n1")
with self.assertRaises(MsgInvalidFieldOrder):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("1f001f012a")), tlv_stream_name="n1")
with self.assertRaises(MsgInvalidFieldOrder):
lnser.read_tlv_stream(fd=io.BytesIO(bfh("ffffffffffffffffff000000")), tlv_stream_name="n2")
def test_encode_decode_msg__missing_mandatory_field_gets_set_to_zeroes(self):
# "channel_update": "signature" missing -> gets set to zeroes
self.assertEqual(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023000000003b9aca00"),
encode_msg(
"channel_update",
short_channel_id=ShortChannelID.from_components(54321, 111, 2),
channel_flags=b'\x00',
message_flags=b'\x01',
cltv_expiry_delta=144,
htlc_minimum_msat=200,
htlc_maximum_msat=1_000_000_000,
fee_base_msat=500,
fee_proportional_millionths=35,
chain_hash=constants.net.rev_genesis_bytes(),
timestamp=1584320643,
))
self.assertEqual(('channel_update',
{'chain_hash': b'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00',
'channel_flags': b'\x00',
'cltv_expiry_delta': 144,
'fee_base_msat': 500,
'fee_proportional_millionths': 35,
'htlc_maximum_msat': 1000000000,
'htlc_minimum_msat': 200,
'message_flags': b'\x01',
'short_channel_id': b'\x00\xd41\x00\x00o\x00\x02',
'signature': bytes(64),
'timestamp': 1584320643}
),
decode_msg(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023000000003b9aca00")))
def test_encode_decode_msg__missing_optional_field_will_not_appear_in_decoded_dict(self):
# "channel_update": optional field "htlc_maximum_msat" missing -> does not get put into dict
self.assertEqual(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023"),
encode_msg(
"channel_update",
short_channel_id=ShortChannelID.from_components(54321, 111, 2),
channel_flags=b'\x00',
message_flags=b'\x01',
cltv_expiry_delta=144,
htlc_minimum_msat=200,
fee_base_msat=500,
fee_proportional_millionths=35,
chain_hash=constants.net.rev_genesis_bytes(),
timestamp=1584320643,
))
self.assertEqual(('channel_update',
{'chain_hash': b'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00',
'channel_flags': b'\x00',
'cltv_expiry_delta': 144,
'fee_base_msat': 500,
'fee_proportional_millionths': 35,
'htlc_minimum_msat': 200,
'message_flags': b'\x01',
'short_channel_id': b'\x00\xd41\x00\x00o\x00\x02',
'signature': bytes(64),
'timestamp': 1584320643}
),
decode_msg(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023")))
def test_encode_decode_msg__ints_can_be_passed_as_bytes(self):
self.assertEqual(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023000000003b9aca00"),
encode_msg(
"channel_update",
short_channel_id=ShortChannelID.from_components(54321, 111, 2),
channel_flags=b'\x00',
message_flags=b'\x01',
cltv_expiry_delta=int.to_bytes(144, length=2, byteorder="big", signed=False),
htlc_minimum_msat=int.to_bytes(200, length=8, byteorder="big", signed=False),
htlc_maximum_msat=int.to_bytes(1_000_000_000, length=8, byteorder="big", signed=False),
fee_base_msat=int.to_bytes(500, length=4, byteorder="big", signed=False),
fee_proportional_millionths=int.to_bytes(35, length=4, byteorder="big", signed=False),
chain_hash=constants.net.rev_genesis_bytes(),
timestamp=int.to_bytes(1584320643, length=4, byteorder="big", signed=False),
))
self.assertEqual(('channel_update',
{'chain_hash': b'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00',
'channel_flags': b'\x00',
'cltv_expiry_delta': 144,
'fee_base_msat': 500,
'fee_proportional_millionths': 35,
'htlc_maximum_msat': 1000000000,
'htlc_minimum_msat': 200,
'message_flags': b'\x01',
'short_channel_id': b'\x00\xd41\x00\x00o\x00\x02',
'signature': bytes(64),
'timestamp': 1584320643}
),
decode_msg(bfh("01020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000000d43100006f00025e6ed0830100009000000000000000c8000001f400000023000000003b9aca00")))
# "htlc_minimum_msat" is passed as bytes but with incorrect length
with self.assertRaises(UnexpectedFieldSizeForEncoder):
encode_msg(
"channel_update",
short_channel_id=ShortChannelID.from_components(54321, 111, 2),
channel_flags=b'\x00',
message_flags=b'\x01',
cltv_expiry_delta=int.to_bytes(144, length=2, byteorder="big", signed=False),
htlc_minimum_msat=int.to_bytes(200, length=4, byteorder="big", signed=False),
htlc_maximum_msat=int.to_bytes(1_000_000_000, length=8, byteorder="big", signed=False),
fee_base_msat=int.to_bytes(500, length=4, byteorder="big", signed=False),
fee_proportional_millionths=int.to_bytes(35, length=4, byteorder="big", signed=False),
chain_hash=constants.net.rev_genesis_bytes(),
timestamp=int.to_bytes(1584320643, length=4, byteorder="big", signed=False),
)
def test_encode_decode_msg__commitment_signed(self):
# "commitment_signed" is interesting because of the "htlc_signature" field,
# which is a concatenation of multiple ("num_htlcs") signatures.
# 5 htlcs
self.assertEqual(bfh("0084010101010101010101010101010101010101010101010101010101010101010106112951d0a6d7fc1dbca3bd1cdbda9acfee7f668b3c0a36bd944f7e2f305b274ba46a61279e15163b2d376c664bb3481d7c5e107a5b268301e39aebbda27d2d00056548bd093a2bd2f4f053f0c6eb2c5f541d55eb8a2ede4d35fe974e5d3cd0eec3138bfd4115f4483c3b14e7988b48811d2da75f29f5e6eee691251fb4fba5a2610ba8fe7007117fe1c9fa1a6b01805c84cfffbb0eba674b64342c7cac567dea50728c1bb1aadc6d23fc2f4145027eafca82d6072cc9ce6529542099f728a0521e4b2044df5d02f7f2cdf84404762b1979528aa689a3e060a2a90ba8ef9a83d24d31ffb0d95c71d9fb9049b24ecf2c949c1486e7eb3ae160d70d54e441dc785dc57f7f3c9901b9537398c66f546cfc1d65e0748895d14699342c407fe119ac17db079b103720124a5ba22d4ba14c12832324dea9cb60c61ee74376ee7dcffdd1836e354aa8838ce3b37854fa91465cc40c73b702915e3580bfebaace805d52373b57ac755ebe4a8fe97e5fc21669bea124b809c79968479148f7174f39b8014542"),
encode_msg(
"commitment_signed",
channel_id=b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
signature=b"\x06\x11)Q\xd0\xa6\xd7\xfc\x1d\xbc\xa3\xbd\x1c\xdb\xda\x9a\xcf\xee\x7ff\x8b<\n6\xbd\x94O~/0['K\xa4ja'\x9e\x15\x16;-7lfK\xb3H\x1d|^\x10z[&\x83\x01\xe3\x9a\xeb\xbd\xa2}-",
num_htlcs=5,
htlc_signature=bfh("6548bd093a2bd2f4f053f0c6eb2c5f541d55eb8a2ede4d35fe974e5d3cd0eec3138bfd4115f4483c3b14e7988b48811d2da75f29f5e6eee691251fb4fba5a2610ba8fe7007117fe1c9fa1a6b01805c84cfffbb0eba674b64342c7cac567dea50728c1bb1aadc6d23fc2f4145027eafca82d6072cc9ce6529542099f728a0521e4b2044df5d02f7f2cdf84404762b1979528aa689a3e060a2a90ba8ef9a83d24d31ffb0d95c71d9fb9049b24ecf2c949c1486e7eb3ae160d70d54e441dc785dc57f7f3c9901b9537398c66f546cfc1d65e0748895d14699342c407fe119ac17db079b103720124a5ba22d4ba14c12832324dea9cb60c61ee74376ee7dcffdd1836e354aa8838ce3b37854fa91465cc40c73b702915e3580bfebaace805d52373b57ac755ebe4a8fe97e5fc21669bea124b809c79968479148f7174f39b8014542"),
))
self.assertEqual(('commitment_signed',
{'channel_id': b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
'signature': b"\x06\x11)Q\xd0\xa6\xd7\xfc\x1d\xbc\xa3\xbd\x1c\xdb\xda\x9a\xcf\xee\x7ff\x8b<\n6\xbd\x94O~/0['K\xa4ja'\x9e\x15\x16;-7lfK\xb3H\x1d|^\x10z[&\x83\x01\xe3\x9a\xeb\xbd\xa2}-",
'num_htlcs': 5,
'htlc_signature': bfh("6548bd093a2bd2f4f053f0c6eb2c5f541d55eb8a2ede4d35fe974e5d3cd0eec3138bfd4115f4483c3b14e7988b48811d2da75f29f5e6eee691251fb4fba5a2610ba8fe7007117fe1c9fa1a6b01805c84cfffbb0eba674b64342c7cac567dea50728c1bb1aadc6d23fc2f4145027eafca82d6072cc9ce6529542099f728a0521e4b2044df5d02f7f2cdf84404762b1979528aa689a3e060a2a90ba8ef9a83d24d31ffb0d95c71d9fb9049b24ecf2c949c1486e7eb3ae160d70d54e441dc785dc57f7f3c9901b9537398c66f546cfc1d65e0748895d14699342c407fe119ac17db079b103720124a5ba22d4ba14c12832324dea9cb60c61ee74376ee7dcffdd1836e354aa8838ce3b37854fa91465cc40c73b702915e3580bfebaace805d52373b57ac755ebe4a8fe97e5fc21669bea124b809c79968479148f7174f39b8014542")}
),
decode_msg(bfh("0084010101010101010101010101010101010101010101010101010101010101010106112951d0a6d7fc1dbca3bd1cdbda9acfee7f668b3c0a36bd944f7e2f305b274ba46a61279e15163b2d376c664bb3481d7c5e107a5b268301e39aebbda27d2d00056548bd093a2bd2f4f053f0c6eb2c5f541d55eb8a2ede4d35fe974e5d3cd0eec3138bfd4115f4483c3b14e7988b48811d2da75f29f5e6eee691251fb4fba5a2610ba8fe7007117fe1c9fa1a6b01805c84cfffbb0eba674b64342c7cac567dea50728c1bb1aadc6d23fc2f4145027eafca82d6072cc9ce6529542099f728a0521e4b2044df5d02f7f2cdf84404762b1979528aa689a3e060a2a90ba8ef9a83d24d31ffb0d95c71d9fb9049b24ecf2c949c1486e7eb3ae160d70d54e441dc785dc57f7f3c9901b9537398c66f546cfc1d65e0748895d14699342c407fe119ac17db079b103720124a5ba22d4ba14c12832324dea9cb60c61ee74376ee7dcffdd1836e354aa8838ce3b37854fa91465cc40c73b702915e3580bfebaace805d52373b57ac755ebe4a8fe97e5fc21669bea124b809c79968479148f7174f39b8014542")))
# single htlc
self.assertEqual(bfh("008401010101010101010101010101010101010101010101010101010101010101013b14af0c549dfb1fb287ff57c012371b3932996db5929eda5f251704751fb49d0dc2dcb88e5021575cb572fb71693758543f97d89e9165f913bfb7488d7cc26500012d31103b9f6e71131e4fee86fdfbdeba90e52b43fcfd11e8e53811cd4d59b2575ae6c3c82f85bea144c88cc35e568f1e6bdd0c57337e86de0b5da7cd9994067a"),
encode_msg(
"commitment_signed",
channel_id=b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
signature=b';\x14\xaf\x0cT\x9d\xfb\x1f\xb2\x87\xffW\xc0\x127\x1b92\x99m\xb5\x92\x9e\xda_%\x17\x04u\x1f\xb4\x9d\r\xc2\xdc\xb8\x8eP!W\\\xb5r\xfbqi7XT?\x97\xd8\x9e\x91e\xf9\x13\xbf\xb7H\x8d|\xc2e',
num_htlcs=1,
htlc_signature=bfh("2d31103b9f6e71131e4fee86fdfbdeba90e52b43fcfd11e8e53811cd4d59b2575ae6c3c82f85bea144c88cc35e568f1e6bdd0c57337e86de0b5da7cd9994067a"),
))
self.assertEqual(('commitment_signed',
{'channel_id': b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
'signature': b';\x14\xaf\x0cT\x9d\xfb\x1f\xb2\x87\xffW\xc0\x127\x1b92\x99m\xb5\x92\x9e\xda_%\x17\x04u\x1f\xb4\x9d\r\xc2\xdc\xb8\x8eP!W\\\xb5r\xfbqi7XT?\x97\xd8\x9e\x91e\xf9\x13\xbf\xb7H\x8d|\xc2e',
'num_htlcs': 1,
'htlc_signature': bfh("2d31103b9f6e71131e4fee86fdfbdeba90e52b43fcfd11e8e53811cd4d59b2575ae6c3c82f85bea144c88cc35e568f1e6bdd0c57337e86de0b5da7cd9994067a")}
),
decode_msg(bfh("008401010101010101010101010101010101010101010101010101010101010101013b14af0c549dfb1fb287ff57c012371b3932996db5929eda5f251704751fb49d0dc2dcb88e5021575cb572fb71693758543f97d89e9165f913bfb7488d7cc26500012d31103b9f6e71131e4fee86fdfbdeba90e52b43fcfd11e8e53811cd4d59b2575ae6c3c82f85bea144c88cc35e568f1e6bdd0c57337e86de0b5da7cd9994067a")))
# zero htlcs
self.assertEqual(bfh("008401010101010101010101010101010101010101010101010101010101010101014e206ecf904d9237b1c5b4e08513555e9a5932c45b5f68be8764ce998df635ae04f6ce7bbcd3b4fd08e2daab7f9059b287ecab4155367b834682633497173f450000"),
encode_msg(
"commitment_signed",
channel_id=b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
signature=b'N n\xcf\x90M\x927\xb1\xc5\xb4\xe0\x85\x13U^\x9aY2\xc4[_h\xbe\x87d\xce\x99\x8d\xf65\xae\x04\xf6\xce{\xbc\xd3\xb4\xfd\x08\xe2\xda\xab\x7f\x90Y\xb2\x87\xec\xabAU6{\x83F\x82c4\x97\x17?E',
num_htlcs=0,
htlc_signature=bfh(""),
))
self.assertEqual(('commitment_signed',
{'channel_id': b'\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
'signature': b'N n\xcf\x90M\x927\xb1\xc5\xb4\xe0\x85\x13U^\x9aY2\xc4[_h\xbe\x87d\xce\x99\x8d\xf65\xae\x04\xf6\xce{\xbc\xd3\xb4\xfd\x08\xe2\xda\xab\x7f\x90Y\xb2\x87\xec\xabAU6{\x83F\x82c4\x97\x17?E',
'num_htlcs': 0,
'htlc_signature': bfh("")}
),
decode_msg(bfh("008401010101010101010101010101010101010101010101010101010101010101014e206ecf904d9237b1c5b4e08513555e9a5932c45b5f68be8764ce998df635ae04f6ce7bbcd3b4fd08e2daab7f9059b287ecab4155367b834682633497173f450000")))
def test_encode_decode_msg__init(self):
# "init" is interesting because it has TLVs optionally
self.assertEqual(bfh("00100000000220c2"),
encode_msg(
"init",
gflen=0,
flen=2,
features=(LnFeatures.OPTION_STATIC_REMOTEKEY_OPT |
LnFeatures.GOSSIP_QUERIES_OPT |
LnFeatures.GOSSIP_QUERIES_REQ |
LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT),
))
self.assertEqual(bfh("00100000000220c2"),
encode_msg("init", gflen=0, flen=2, features=bfh("20c2")))
self.assertEqual(bfh("00100000000220c2012043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000"),
encode_msg(
"init",
gflen=0,
flen=2,
features=(LnFeatures.OPTION_STATIC_REMOTEKEY_OPT |
LnFeatures.GOSSIP_QUERIES_OPT |
LnFeatures.GOSSIP_QUERIES_REQ |
LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT),
init_tlvs={
'networks':
{'chains': b'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00'}
}
))
self.assertEqual(('init',
{'gflen': 2,
'globalfeatures': b'"\x00',
'flen': 3,
'features': b'\x02\xa2\xa1',
'init_tlvs': {}}
),
decode_msg(bfh("001000022200000302a2a1")))
self.assertEqual(('init',
{'gflen': 2,
'globalfeatures': b'"\x00',
'flen': 3,
'features': b'\x02\xaa\xa2',
'init_tlvs': {
'networks':
{'chains': b'CI\x7f\xd7\xf8&\x95q\x08\xf4\xa3\x0f\xd9\xce\xc3\xae\xbay\x97 \x84\xe9\x0e\xad\x01\xea3\t\x00\x00\x00\x00'}
}}),
decode_msg(bfh("001000022200000302aaa2012043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000")))
def test_decode_onion_error(self):
orf = OnionRoutingFailure.from_bytes(bfh("400f0000000017d2d8b0001d9458"))
self.assertEqual(('incorrect_or_unknown_payment_details', {'htlc_msat': 399694000, 'height': 1938520}),
OnionWireSerializer.decode_msg(orf.to_bytes()))
self.assertEqual({'htlc_msat': 399694000, 'height': 1938520},
orf.decode_data())
orf2 = OnionRoutingFailure(26399, bytes.fromhex("0000000017d2d8b0001d9458"))
with self.assertRaises(UnknownMsgType):
OnionWireSerializer.decode_msg(orf2.to_bytes())
self.assertEqual(None, orf2.decode_data())
| electrum/tests/test_lnmsg.py | 33,626 | from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.mdtlv-decoding-failures check if ECC point is valid?... skip for now.with self.assertRaises(Exception): lnser.read_tlv_stream(fd=io.BytesIO(bfh("0331043da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb00000000000000010000000000000002")), tlv_stream_name="n1") from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.mdtlv-decoding-successes from https://github.com/lightningnetwork/lightning-rfc/blob/452a0eb916fedf4c954137b4fd0b61b5002b34ad/01-messaging.mdtlv-stream-decoding-failure "channel_update": "signature" missing -> gets set to zeroes "channel_update": optional field "htlc_maximum_msat" missing -> does not get put into dict "htlc_minimum_msat" is passed as bytes but with incorrect length "commitment_signed" is interesting because of the "htlc_signature" field, which is a concatenation of multiple ("num_htlcs") signatures. 5 htlcs single htlc zero htlcs "init" is interesting because it has TLVs optionally | 1,111 | en | 0.601119 |
#fake database to get the pygame running
import random
questions = ["Question 1?", "Question 2?", "Question 3?", "Question 4?"]
answers = ["Answer 1", "Answer 2", "Answer 3", "Answer 4"]
def get_question():
return(random.choice(questions))
def get_answer():
return(random.choice(answers)) | quiz/fake_db.py | 299 | fake database to get the pygame running | 39 | en | 0.747899 |
#!/usr/bin/env python
"""
An example of image registration via the DTCWT.
This script demonstrates some methods for image registration using the DTCWT.
"""
from __future__ import division, print_function
import itertools
import logging
import os
from matplotlib.pyplot import *
import numpy as np
import dtcwt
from dtcwt.numpy import Transform2d
import dtcwt.sampling
from dtcwt.registration import *
logging.basicConfig(level=logging.INFO)
import datasets
def register_frames(filename):
# Load test images
logging.info('Loading frames from "{0}"'.format(filename))
f1, f2 = datasets.regframes(filename)
# Take the DTCWT of both frames.
logging.info('Taking DTCWT')
nlevels = 6
trans = Transform2d()
t1 = trans.forward(f1, nlevels=nlevels)
t2 = trans.forward(f2, nlevels=nlevels)
# Solve for transform
logging.info('Finding flow')
avecs = estimatereg(t1, t2)
logging.info('Computing warped image')
warped_f1 = warp(f1, avecs, method='bilinear')
logging.info('Computing velocity field')
step = 16
X, Y = np.meshgrid(np.arange(f1.shape[1]), np.arange(f1.shape[0]))
vxs, vys = velocityfield(avecs, f1.shape, method='nearest')
vxs -= np.median(vxs.flat)
vys -= np.median(vys.flat)
figure(figsize=(16,9))
subplot(221)
imshow(np.dstack((f1, f2, np.zeros_like(f1))))
title('Overlaid frames')
subplot(222)
imshow(np.dstack((warped_f1, f2, np.zeros_like(f2))))
title('Frame 1 warped to Frame 2 (image domain)')
subplot(223)
sc = 2
imshow(np.dstack((f1, f2, np.zeros_like(f2))))
quiver(X[::step,::step], Y[::step,::step],
-sc*vxs[::step,::step]*f1.shape[1], -sc*vys[::step,::step]*f1.shape[0],
color='b', angles='xy', scale_units='xy', scale=1)
title('Computed velocity field (median subtracted), x{0}'.format(sc))
subplot(224)
imshow(np.sqrt(vxs*vxs + vys*vys), interpolation='none', cmap=cm.hot)
colorbar()
title('Magnitude of computed velocity (median subtracted)')
# savefig(os.path.splitext(os.path.basename(filename))[0] + '-registration.png')
register_frames('traffic')
register_frames('tennis')
| docs/image-registration.py | 2,186 | An example of image registration via the DTCWT.
This script demonstrates some methods for image registration using the DTCWT.
!/usr/bin/env python Load test images Take the DTCWT of both frames. Solve for transform savefig(os.path.splitext(os.path.basename(filename))[0] + '-registration.png') | 295 | en | 0.578089 |
"""Azure Devops Server metric collector."""
from typing import List
import requests
from ..collector import Collector
from ..type import Entities, URL, Value
class AzureDevopsBase(Collector):
"""Base class for Azure DevOps collectors."""
def api_url(self) -> URL:
url = super().api_url()
return URL(f"{url}/_apis/wit/wiql?api-version=4.1")
def get_source_responses(self, api_url: URL) -> List[requests.Response]:
"""Override because we need to do a post request and need to separately get the entities."""
auth = self.basic_auth_credentials()
response = requests.post(
api_url, timeout=self.TIMEOUT, auth=auth, json=dict(query=self.parameters.get("wiql", "")))
ids = ",".join([str(work_item["id"]) for work_item in response.json().get("workItems", [])])
if not ids:
return [response]
work_items_url = URL(f"{super().api_url()}/_apis/wit/workitems?ids={ids}&api-version=4.1")
return [response, requests.get(work_items_url, timeout=self.TIMEOUT, auth=auth)]
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
if len(responses) < 2:
return [] # We didn't get a response with work items, so assume there are none
return [
dict(
key=str(work_item["id"]), project=work_item["fields"]["System.TeamProject"],
title=work_item["fields"]["System.Title"], work_item_type=work_item["fields"]["System.WorkItemType"],
state=work_item["fields"]["System.State"],
url=work_item["url"]) for work_item in responses[1].json()["value"]]
class AzureDevopsIssues(AzureDevopsBase):
"""Collector to get issues from Azure Devops Server."""
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(len(responses[0].json()["workItems"]))
class AzureDevopsReadyUserStoryPoints(AzureDevopsBase):
"""Collector to get ready user story points from Azure Devops Server."""
def parse_source_responses_value(self, responses: List[requests.Response]) -> Value:
return str(round(sum(
[work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints", 0)
for work_item in responses[1].json()["value"]]))) if len(responses) > 1 else "0"
def parse_source_responses_entities(self, responses: List[requests.Response]) -> Entities:
entities = super().parse_source_responses_entities(responses)
# Add story points to the entities:
if len(responses) > 1:
for entity, work_item in zip(entities, responses[1].json()["value"]):
entity["story_points"] = work_item["fields"].get("Microsoft.VSTS.Scheduling.StoryPoints")
return entities
| components/collector/src/collectors/azure_devops.py | 2,815 | Base class for Azure DevOps collectors.
Collector to get issues from Azure Devops Server.
Collector to get ready user story points from Azure Devops Server.
Override because we need to do a post request and need to separately get the entities.
Azure Devops Server metric collector.
We didn't get a response with work items, so assume there are none Add story points to the entities: | 384 | en | 0.89627 |
import json
import pytest
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
def url_string(string="/graphql", **url_params):
if url_params:
string += "?" + urlencode(url_params)
return string
def batch_url_string(**url_params):
return url_string("/graphql/batch", **url_params)
j = lambda **kwargs: json.dumps(kwargs)
jl = lambda **kwargs: json.dumps([kwargs])
@pytest.mark.django_db
def test_graphiql_is_enabled(client):
from django.conf import settings
response = client.get(url_string(), HTTP_ACCEPT="text/html")
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_graphiql(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="text/html",))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "text/html"
@pytest.mark.django_db
def test_qfactor_json(client):
response = client.get(url_string(query="{test}", HTTP_ACCEPT="application/json",))
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_query_param(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_variable_values(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
HTTP_ACCEPT="application/json",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_get_with_operation_name(client):
response = client.get(
url_string(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_reports_validation_errors(client):
response = client.get(url_string(query="{ test, unknownOne, unknownTwo }"))
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": 'Cannot query field "unknownOne" on type "QueryRoot".',
"locations": [{"line": 1, "column": 9}],
},
{
"message": 'Cannot query field "unknownTwo" on type "QueryRoot".',
"locations": [{"line": 1, "column": 21}],
},
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_missing_operation_name(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{
"message": "Must provide operation name if query contains multiple operations."
}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_sending_a_mutation_via_get(client):
response = client.get(
url_string(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_errors_when_selecting_a_mutation_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status_code == 405
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_mutation_to_exist_within_a_get(client):
response = client.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_json_encoding(client):
response = client.post(url_string(), j(query="{test}"), "application/json")
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_json_encoding(client):
response = client.post(
batch_url_string(), jl(id=1, query="{test}"), "application/json"
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
# returns just json as __dict__
expected_dict = [{"id": 1, "data": {"test": "Hello World"}, "status": 200}]
# directly compare all key,value for __dict__ -- NOTE responce is list of stuff!
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_fails_if_is_empty(client):
response = client.post(batch_url_string(), "[]", "application/json")
assert response.status_code == 400
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "Received an empty list in the batch request."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_sending_a_mutation_via_post(client):
response = client.post(
url_string(),
j(query="mutation TestMutation { writeTest { test } }"),
"application/json",
)
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"writeTest": {"test": "Hello World"}}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_url_encoding(client):
response = client.post(
url_string(),
urlencode(dict(query="{test}")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_string_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_string_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
# directly compare all key,value for __dict__ -- NOTE responce is list of stuff!
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_query_with_json_variables(client):
response = client.post(
url_string(),
j(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_supports_post_json_query_with_json_variables(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200}]
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_url_encoded_query_with_string_variables(client):
response = client.post(
url_string(),
urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_json_quey_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
j(query="query helloWho($who: String){ test(who: $who) }"),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_post_url_encoded_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
urlencode(dict(query="query helloWho($who: String){ test(who: $who) }")),
"application/x-www-form-urlencoded",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_supports_post_raw_text_query_with_get_variable_values(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"application/graphql",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello Dolly"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_operation_name(client):
response = client.post(
url_string(),
j(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
# returns just json as __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_batch_allows_post_with_operation_name(client):
response = client.post(
batch_url_string(),
jl(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
"application/json",
)
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = [
{
"id": 1,
"data": {"test": "Hello World", "shared": "Hello Everyone"},
"status": 200,
}
]
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_allows_post_with_get_operation_name(client):
response = client.post(
url_string(operationName="helloWorld"),
"""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
"application/graphql",
)
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"test": "Hello World", "shared": "Hello Everyone"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# inherited/ ???
"""
@pytest.mark.django_db
@pytest.mark.urls("graphene_django.tests.urls_inherited")
def test_inherited_class_with_attributes_works(client):
inherited_url = "/graphql/inherited/"
# Check schema and pretty attributes work
response = client.post(url_string(inherited_url, query="{test}"))
assert response.status_code == 200
# returns just json as list of __dict__
expected_dict = (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# Check graphiql works
response = client.get(url_string(inherited_url), HTTP_ACCEPT="text/html")
assert response.status_code == 200
"""
@pytest.mark.django_db
def test_handles_field_errors_caught_by_graphql(client):
response = client.get(url_string(query="{thrower}"))
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"path": ["thrower"],
"message": "Throws!",
}
],
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_syntax_errors_caught_by_graphql(client):
response = client.get(url_string(query="syntaxerror"))
assert response.status_code == 400
# returns just json as list of __dict__
expected_dict = {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error GraphQL (1:1) "
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n',
}
]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_errors_caused_by_a_lack_of_query(client):
response = client.get(url_string())
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_not_expected_json_bodies(client):
response = client.post(url_string(), "[]", "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "The received data is not a valid JSON query."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_invalid_json_bodies(client):
response = client.post(url_string(), "[oh}", "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_django_request_error(client, monkeypatch):
def mocked_read(*args):
raise IOError("foo-bar")
monkeypatch.setattr("django.http.request.HttpRequest.read", mocked_read)
valid_json = json.dumps(dict(foo="bar"))
response = client.post(url_string(), valid_json, "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "foo-bar"}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_plain_post_text(client):
response = client.post(
url_string(variables=json.dumps({"who": "Dolly"})),
"query helloWho($who: String){ test(who: $who) }",
"text/plain",
)
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Must provide query string."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_poorly_formed_variables(client):
response = client.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
)
)
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "Variables are invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_unsupported_http_methods(client):
response = client.put(url_string(query="{test}"))
assert response.status_code == 405
assert response["Allow"] == "GET, POST"
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {
"errors": [{"message": "GraphQL only supports GET and POST requests."}]
}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_handles_incomplete_json_bodies(client):
response = client.post(url_string(), '{"query":', "application/json")
assert response.status_code == 400
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"errors": [{"message": "POST body sent invalid JSON."}]}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
@pytest.mark.django_db
def test_passes_request_into_context_request(client):
response = client.get(url_string(query="{request}", q="testing"))
assert response.status_code == 200
# returns just json as list of __dict__
assert response["Content-Type"].split(";")[0] == "application/json"
expected_dict = {"data": {"request": "testing"}}
# directly compare all key,value for __dict__
assert response.json() == expected_dict
# pretty() -- comparing as string
@pytest.mark.django_db
@pytest.mark.urls("graphene_django.tests.urls_pretty")
def test_supports_pretty_printing(client):
response = client.get(url_string(query="{test}"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.django_db
def test_supports_pretty_printing_by_request(client):
response = client.get(url_string(query="{test}", pretty="1"))
assert response.status_code == 200
assert response["Content-Type"].split(";")[0] == "application/json"
assert response.content.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
# GraphQL SPEC:
# TODO: more mutations and somesucriptions
# TODO: fragment
# TODO: META __typename
# Additions:
# META AUTH
# ?not working? CDN not static/ for DEBUG
| graphene_django/tests/test_views.py | 24,067 | returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ -- NOTE responce is list of stuff! directly compare all key,value for __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ -- NOTE responce is list of stuff! returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ inherited/ ??? returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ returns just json as list of __dict__ directly compare all key,value for __dict__ pretty() -- comparing as string GraphQL SPEC: TODO: more mutations and somesucriptions TODO: fragment TODO: META __typename Additions: META AUTH ?not working? CDN not static/ for DEBUG | 2,797 | en | 0.635859 |
import RPi.GPIO as GPIO
from time import sleep
import sys
#Set warnings off (optional)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
#Set Button and LED pins
JOYSTICK_BUTTON = 12
MAIN_SWITCH = 22
LED = 6
class button():
'''
A simple Push-Button class
'''
def __init__(self, pin, pud_up_down):
print("'def __init__(self," + str(pin)+ "): '")
GPIO.setup(pin, GPIO.IN, pull_up_down=pud_up_down)
GPIO.setup(LED,GPIO.OUT)
GPIO.add_event_detect(pin, GPIO.BOTH, callback=self.push_button_callback, bouncetime=300)
# GPIO.add_event_detect(pin, GPIO.FALLING, callback=self.release_button_callback, bouncetime=300)
def push_button_callback(self, channel):
print(channel)
sleep(0.1)
if GPIO.input(channel):
print("Rising edge detected on " + str(channel) )
GPIO.output(LED,GPIO.HIGH)
else:
print("Falling edge detected on " + str(channel) )
GPIO.output(LED,GPIO.LOW)
def main(args=None):
main_switch = button(MAIN_SWITCH, GPIO.PUD_DOWN)
joystick_button = button(JOYSTICK_BUTTON, GPIO.PUD_UP)
try:
while True:
print(".")
sleep(5)
except KeyboardInterrupt:
print("LedLightNode **** 💀 Ctrl-C detected...")
finally:
print("LedLightNode **** 🪦 Ending... ")
print( str(sys.exc_info()[1]) ) # Need ´import sys´
# Time to clean up stuff!
GPIO.cleanup()
if __name__ == "__main__":
main() | pet_mk_viii/UnitTest/push_button.py | 1,535 | A simple Push-Button class
Set warnings off (optional)Set Button and LED pins GPIO.add_event_detect(pin, GPIO.FALLING, callback=self.release_button_callback, bouncetime=300) Need ´import sys´ Time to clean up stuff! | 223 | en | 0.447001 |
import pickle
import sys
import platform
import gc
import copy
import warnings
import tempfile
from StringIO import StringIO
from os import path
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, assert_warns, dec
)
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert_(abs(arr-res2).max() < 1e-8, func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert_(np.dtype('i4') == np.dtype(('i4',())))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert_(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == asbytes('abc'))
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s,v): x[(s>0)]=v
self.assertRaises(ValueError,ia,x,s,np.zeros(9,dtype=float))
self.assertRaises(ValueError,ia,x,s,np.zeros(11,dtype=float))
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:,:] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:,0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:,0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr3 = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
arr3 = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
arr3 = arr1.take([1,2,3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat(object):
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
f = open(filename, 'rb')
xp = pickle.load(f, encoding='latin1')
f.close()
else:
f = open(filename)
xp = pickle.load(f)
f.close()
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert_(dat.info == 'jubba')
dat.resize((4,2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2,3,4],[6,3,4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32,0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2,7).info == 'jubba')
assert_(dat.compress([0,1,1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32,0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2,4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0,1).info == 'jubba')
assert_(dat.take([2,3,5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
olderr = np.seterr(invalid='ignore')
try:
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
finally:
np.seterr(**olderr)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert_(n_before >= n_after, (n_before, n_after))
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0,1] == x[0,0])
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])))
assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
finally:
warn_ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([],['?','?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1,0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1,2,3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1],[s2],[s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1,17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1,2],[3,4]]).T
y = np.array(x.flat)
assert_equal(x, [[1,3],[2,4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_structured_type_to_object(self):
a_rec = np.array([(0,1), (3,2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0,1)
a_obj[1] = (3,2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2,2), dtype=object)
a[...] = [[1,2]]
assert_equal(a, [[1,2], [1,2]])
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
res = np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr in [asunicode, asbytes]:
b = np.array([val, tostr('xx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val])
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')])
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val])
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for Numpy's four byte unicode.
if sys.version_info[0] >= 3:
a = np.array(['abcd'])
else:
a = np.array([u'abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode)[0]
del a
def test_maskna_deallocation(self):
# This caused a segfault when running under python-debug
a = np.array([1]).view(maskna=True)
del a
if __name__ == "__main__":
run_module_suite()
| numpy/core/tests/test_regression.py | 59,333 | Longfloat support is not consistent enough across platforms for this test to be meaningful. def test_longfloat_repr(self,level=rlevel): """Ticket 112""" if np.longfloat(0).itemsize > 8: a = np.exp(np.array([1000],dtype=np.longfloat)) assert_(str(a)[1:9] == str(a[0])[:8]) Create discontiguous Fortran-ordered array Wrong way Correct way Make sure methods and functions have same default axis keyword and arguments This used to trigger the object-type version of the bitwise_or operation, because float64 -> object casting succeeds NB. this is probably CPython-specific -- 0d -> 1d broadcasted slice assignment -- 1d -> 2d broadcasted slice assignment -- 2d copying + flattening -- concatenate, repeat, take, choose refcount of 'd' might hit zero here trying to induce a segfault by doing it again... y is now typically not aligned on a 8-byte boundary This shouldn't cause a segmentation fault: The test data file was generated by scipy.io.savemat. The dtype is float64, but the isbuiltin attribute is 0. compress with out argument segfaulted if cannot cast safely Comparisons fail for NaN, so we can't use random memory for the test. this shouldn't leak a reference to errobj Try to ensure that x->data contains non-zero floats `dot` should just return zero (m,n) matrix Set a size which cannot fit into a 64 bits signed integer Set a size which cannot fit into a 64 bits signed integer This didn't require a fix, but it's worth testing anyway, because it may fail if .dot stops enforcing the arrays to be BEHAVED always native-endian little-endian machine big-endian machine Just make sure this doesn't throw an exception Bug 1436; the following should succeed Check if log1p is behaving on 32 bit intel systems. Ticket 1345: the following should not cause a crash Check that the out= argument in var and std has an effect ticket 1563 shouldn't raise an exception ticket 1548 ticket 939 On Python 3, tofile/fromfile used to get (1610) the Python file handle out of sync should not cause a segmentation fault From e-mail thread "__eq__ with str and object" (Keith Goodman) [0] if nonzero() ignores swap Ticket 1695 This was throwing an exception because in ctors.c, discover_itemsize was calling PyObject_Length without checking the return code. This failed to get the length of the number 2, and the exception hung around until something checked PyErr_Occurred() and returned an error. all and any for floats Array scalars are both C and Fortran contiguous Object arrays with references to themselves can cause problems This was causing a to become like the above Setfield directly manipulates the raw array data, so is invalid for object arrays. Check that loads does not clobber interned strings astype records -> object '=' records -> object '=' object -> records Ticket 1870 The inner list should get assigned to the object elements The first dimension should get broadcast Ticket 1917 - ensure that array data doesn't leak 100MB times 1000 would give 100GB of memory usage if it leaks Ticket 2021, should not segfault. Ticket 1990 - Data can be truncated in creation of an array from a mixed sequence of numeric values and strings test also with longer strings Ticket 2081. Python compiled with two byte unicode can lead to truncation if itemsize is not properly adjusted for Numpy's four byte unicode. Ticket 2063 must always choose stable sort for argsort to get consistent results Ticket 1578, the mismatch only showed up when running python-debug for python versions >= 2.7, and then as a core dump and error message. This caused a segfault when running under python-debug | 3,628 | en | 0.853244 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""
Class describing simulation configuration object
"""
import os
import shutil
import subprocess
import sys
from collections import OrderedDict
import logging as log
from tabulate import tabulate
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, RunTest, Deploy
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
from testplanner import testplan_utils
from utils import VERBOSE, find_and_substitute_wildcards
def pick_dump_format(fmts):
'''Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
'''
assert fmts
fmt = fmts[0]
if fmt == 'fsdb' and not shutil.which('verdi'):
return pick_dump_format(fmts[1:])
return fmt
def resolve_dump_format(tool, dump):
'''Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None.
'''
assert tool is not None
SUPPORTED_DUMP_FMTS = {
'vcs': ['fsdb', 'vpd'],
'xcelium': ['fsdb', 'shm', 'vpd']
}
# Look up which dumping formats the tool supports
fmts = SUPPORTED_DUMP_FMTS.get(tool)
if dump is not None:
# If the user has specified their preferred dumping format, use it. As
# a sanity check, error out if the chosen tool doesn't support the
# format, but only if we know about the tool. If not, we'll just assume
# they know what they're doing.
if fmts is not None and dump not in fmts:
log.error('Chosen tool ({}) does not support wave '
'dumping format {!r}.'
.format(tool, dump))
sys.exit(1)
return dump
# If the user hasn't specified a dumping format, but has asked for waves,
# we need to decide on a format for them. If fmts is None, we don't know
# about this tool. Maybe it's a new simulator, in which case, default to
# VPD and hope for the best.
if not fmts:
return 'vpd'
return pick_dump_format(fmts)
class SimCfg(FlowCfg):
"""Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
"""
def __init__(self, flow_cfg_file, proj_root, args):
super().__init__(flow_cfg_file, proj_root, args)
# Options set from command line
self.tool = args.tool
self.build_opts = []
self.build_opts.extend(args.build_opts)
self.en_build_modes = args.build_modes.copy()
self.run_opts = []
self.run_opts.extend(args.run_opts)
self.en_run_modes = []
self.en_run_modes.extend(args.run_modes)
self.build_unique = args.build_unique
self.build_only = args.build_only
self.run_only = args.run_only
self.reseed_ovrd = args.reseed
self.reseed_multiplier = args.reseed_multiplier
self.waves = args.waves
self.max_waves = args.max_waves
self.cov = args.cov
self.cov_merge_previous = args.cov_merge_previous
self.profile = args.profile or '(cfg uses profile without --profile)'
self.xprop_off = args.xprop_off
self.no_rerun = args.no_rerun
self.verbosity = "{" + args.verbosity + "}"
self.verbose = args.verbose
self.dry_run = args.dry_run
self.map_full_testplan = args.map_full_testplan
# Disable cov if --build-only is passed.
if self.build_only:
self.cov = False
# Set default sim modes for unpacking
if self.waves is True:
self.en_build_modes.append("waves")
if self.cov is True:
self.en_build_modes.append("cov")
if args.profile is not None:
self.en_build_modes.append("profile")
if self.xprop_off is not True:
self.en_build_modes.append("xprop")
# Options built from cfg_file files
self.project = ""
self.flow = ""
self.flow_makefile = ""
self.build_dir = ""
self.run_dir = ""
self.sw_build_dir = ""
self.pass_patterns = []
self.fail_patterns = []
self.name = ""
self.dut = ""
self.tb = ""
self.testplan = ""
self.fusesoc_core = ""
self.ral_spec = ""
self.build_modes = []
self.run_modes = []
self.regressions = []
# Options from tools - for building and running tests
self.build_cmd = ""
self.flist_gen_cmd = ""
self.flist_gen_opts = []
self.flist_file = ""
self.run_cmd = ""
# Generated data structures
self.links = {}
self.build_list = []
self.run_list = []
self.cov_merge_deploy = None
self.cov_report_deploy = None
self.results_summary = OrderedDict()
# If is_master_cfg is set, then each cfg will have its own cov_deploy.
# Maintain an array of those in cov_deploys.
self.cov_deploys = []
# Parse the cfg_file file tree
self.parse_flow_cfg(flow_cfg_file)
self._post_parse_flow_cfg()
# Choose a dump format now. Note that this has to happen after parsing
# the configuration format because our choice might depend on the
# chosen tool.
self.dump_fmt = (resolve_dump_format(self.tool, args.dump)
if self.waves else 'none')
# If build_unique is set, then add current timestamp to uniquify it
if self.build_unique:
self.build_dir += "_" + self.timestamp
# Process overrides before substituting the wildcards.
self._process_overrides()
# Make substitutions, while ignoring the following wildcards
# TODO: Find a way to set these in sim cfg instead
ignored_wildcards = [
"build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq",
"cov_db_dirs", "sw_test", "sw_test_is_prebuilt", "sw_build_device"
]
self.__dict__ = find_and_substitute_wildcards(self.__dict__,
self.__dict__,
ignored_wildcards,
self.is_master_cfg)
# Set the title for simulation results.
self.results_title = self.name.upper() + " Simulation Results"
# Stuff below only pertains to individual cfg (not master cfg)
# or individual selected cfgs (if select_cfgs is configured via command line)
# TODO: find a better way to support select_cfgs
if not self.is_master_cfg and (not self.select_cfgs or
self.name in self.select_cfgs):
# If self.tool is None at this point, there was no --tool argument on
# the command line, and there is no default tool set in the config
# file. That's ok if this is a master config (where the
# sub-configurations can choose tools themselves), but not otherwise.
if self.tool is None:
log.error('Config file does not specify a default tool, '
'and there was no --tool argument on the command line.')
sys.exit(1)
# Print info:
log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path)
# Set directories with links for ease of debug / triage.
self.links = {
"D": self.scratch_path + "/" + "dispatched",
"P": self.scratch_path + "/" + "passed",
"F": self.scratch_path + "/" + "failed",
"K": self.scratch_path + "/" + "killed"
}
# Use the default build mode for tests that do not specify it
if not hasattr(self, "build_mode"):
setattr(self, "build_mode", "default")
self._process_exports()
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a master cfg obj
self._create_objects()
# Post init checks
self.__post_init__()
def __post_init__(self):
# Run some post init checks
super().__post_init__()
def kill(self):
'''kill running processes and jobs gracefully
'''
super().kill()
for item in self.cov_deploys:
item.kill()
# Purge the output directories. This operates on self.
def _purge(self):
if self.scratch_path:
try:
log.info("Purging scratch path %s", self.scratch_path)
os.system("/bin/rm -rf " + self.scratch_path)
except IOError:
log.error('Failed to purge scratch directory %s',
self.scratch_path)
def _create_objects(self):
# Create build and run modes objects
self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
self.run_modes = Modes.create_modes(RunModes, self.run_modes)
# Walk through build modes enabled on the CLI and append the opts
for en_build_mode in self.en_build_modes:
build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
if build_mode_obj is not None:
self.build_opts.extend(build_mode_obj.build_opts)
self.run_opts.extend(build_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_build_mode)
sys.exit(1)
# Walk through run modes enabled on the CLI and append the opts
for en_run_mode in self.en_run_modes:
run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
if run_mode_obj is not None:
self.run_opts.extend(run_mode_obj.run_opts)
else:
log.error(
"Mode \"%s\" enabled on the the command line is not defined",
en_run_mode)
sys.exit(1)
# Create tests from given list of items
tests = Tests.create_tests(getattr(self, "tests"), self)
setattr(self, "tests", tests)
# Regressions
# Parse testplan if provided.
if self.testplan != "":
self.testplan = testplan_utils.parse_testplan(self.testplan)
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Create regressions
regressions = Regressions.create_regressions(
getattr(self, "regressions"), self, tests)
setattr(self, "regressions", regressions)
def _print_list(self):
for list_item in self.list_items:
log.info("---- List of %s in %s ----", list_item, self.name)
if hasattr(self, list_item):
items = getattr(self, list_item)
for item in items:
log.info(item)
else:
log.error("Item %s does not exist!", list_item)
def _create_build_and_run_list(self):
# Walk through the list of items to run and create the build and run
# objects.
# Allow multiple regressions to run as long as the do not enable
# sim_modes or run_modes
def get_overlapping_tests(tests, run_list_names):
overlapping_tests = []
for test in tests:
if test.name in run_list_names:
overlapping_tests.append(test)
return overlapping_tests
def prune_items(items, marked_items):
pruned_items = []
for item in items:
if item not in marked_items:
pruned_items.append(item)
return pruned_items
# Check if there are items to run
if self.items == []:
log.error(
"No items provided for running this simulation / regression")
sys.exit(1)
items_list = self.items
run_list_names = []
marked_items = []
# Process regressions first
for regression in self.regressions:
if regression.name in items_list:
overlapping_tests = get_overlapping_tests(
regression.tests, run_list_names)
if overlapping_tests != []:
log.error(
"Regression \"%s\" added for run contains tests that overlap with "
"other regressions added. This can result in conflicting "
"build / run_opts to be set causing unexpected results.",
regression.name)
sys.exit(1)
self.run_list.extend(regression.tests)
# Merge regression's build and run opts with its tests and their
# build_modes
regression.merge_regression_opts()
run_list_names.extend(regression.test_names)
marked_items.append(regression.name)
items_list = prune_items(items_list, marked_items)
# Process individual tests
for test in self.tests:
if test.name in items_list:
overlapping_tests = get_overlapping_tests([test],
run_list_names)
if overlapping_tests == []:
self.run_list.append(test)
run_list_names.append(test.name)
marked_items.append(test.name)
items_list = prune_items(items_list, marked_items)
# Merge the global build and run opts
Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts)
# Check if all items have been processed
if items_list != []:
log.error(
"The items %s added for run were not found in \n%s!\n "
"Use the --list switch to see a list of available "
"tests / regressions.", items_list, self.flow_cfg_file)
# Process reseed override and create the build_list
build_list_names = []
for test in self.run_list:
# Override reseed if available.
if self.reseed_ovrd is not None:
test.reseed = self.reseed_ovrd
# Apply reseed multiplier if set on the command line.
test.reseed *= self.reseed_multiplier
# Create the unique set of builds needed.
if test.build_mode.name not in build_list_names:
self.build_list.append(test.build_mode)
build_list_names.append(test.build_mode.name)
def _create_dirs(self):
'''Create initial set of directories
'''
# Invoking system calls has a performance penalty.
# Construct a single command line chained with '&&' to invoke
# the system call only once, rather than multiple times.
create_link_dirs_cmd = ""
for link in self.links.keys():
create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && "
create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && "
create_link_dirs_cmd += " true"
try:
os.system(create_link_dirs_cmd)
except IOError:
log.error("Error running when running the cmd \"%s\"",
create_link_dirs_cmd)
sys.exit(1)
def _create_deploy_objects(self):
'''Create deploy objects from the build and run lists.
'''
# Create the build and run list first
self._create_build_and_run_list()
builds = []
build_map = {}
for build in self.build_list:
item = CompileSim(build, self)
builds.append(item)
build_map[build] = item
runs = []
for test in self.run_list:
for num in range(test.reseed):
item = RunTest(num, test, self)
if self.build_only is False:
build_map[test.build_mode].sub.append(item)
runs.append(item)
self.builds = builds
self.runs = runs
if self.run_only is True:
self.deploy = runs
else:
self.deploy = builds
# Create cov_merge and cov_report objects
if self.cov:
self.cov_merge_deploy = CovMerge(self)
self.cov_report_deploy = CovReport(self)
# Generate reports only if merge was successful; add it as a dependency
# of merge.
self.cov_merge_deploy.sub.append(self.cov_report_deploy)
# Create initial set of directories before kicking off the regression.
self._create_dirs()
def create_deploy_objects(self):
'''Public facing API for _create_deploy_objects().
'''
super().create_deploy_objects()
# Also, create cov_deploys
if self.cov:
for item in self.cfgs:
if item.cov:
self.cov_deploys.append(item.cov_merge_deploy)
# deploy additional commands as needed. We do this separated for coverage
# since that needs to happen at the end.
def deploy_objects(self):
'''This is a public facing API, so we use "self.cfgs" instead of self.
'''
# Invoke the base class method to run the regression.
super().deploy_objects()
# If coverage is enabled, then deploy the coverage tasks.
if self.cov:
Deploy.deploy(self.cov_deploys)
def _cov_analyze(self):
'''Use the last regression coverage data to open up the GUI tool to
analyze the coverage.
'''
cov_analyze_deploy = CovAnalyze(self)
self.deploy = [cov_analyze_deploy]
def cov_analyze(self):
'''Public facing API for analyzing coverage.
'''
for item in self.cfgs:
item._cov_analyze()
def _gen_results(self):
'''
The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
'''
# TODO: add support for html
def retrieve_result(name, results):
for item in results:
if name == item["name"]:
return item
return None
def gen_results_sub(items, results, fail_msgs):
'''
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
'''
for item in items:
if item.status == "F":
fail_msgs += item.fail_msg
# Generate results table for runs.
if item.target == "run":
result = retrieve_result(item.name, results)
if result is None:
result = {"name": item.name, "passing": 0, "total": 0}
results.append(result)
if item.status == "P":
result["passing"] += 1
result["total"] += 1
(results, fail_msgs) = gen_results_sub(item.sub, results,
fail_msgs)
return (results, fail_msgs)
regr_results = []
fail_msgs = ""
deployed_items = self.deploy
if self.cov:
deployed_items.append(self.cov_merge_deploy)
(regr_results, fail_msgs) = gen_results_sub(deployed_items,
regr_results, fail_msgs)
# Add title if there are indeed failures
if fail_msgs != "":
fail_msgs = "\n## List of Failures\n" + fail_msgs
self.errors_seen = True
# Generate results table for runs.
results_str = "## " + self.results_title + "\n"
results_str += "### " + self.timestamp_long + "\n"
# Add path to testplan.
if hasattr(self, "testplan_doc_path"):
testplan = "https://" + self.doc_server + '/' + getattr(
self, "testplan_doc_path")
else:
testplan = "https://" + self.doc_server + '/' + self.rel_path
testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan")
results_str += "### [Testplan](" + testplan + ")\n"
results_str += "### Simulator: " + self.tool.upper() + "\n\n"
if regr_results == []:
results_str += "No results to display.\n"
else:
# TODO: check if testplan is not null?
# Map regr results to the testplan entries.
results_str += self.testplan.results_table(
regr_results=regr_results,
map_full_testplan=self.map_full_testplan)
results_str += "\n"
self.results_summary = self.testplan.results_summary
# Append coverage results of coverage was enabled.
if self.cov:
if self.cov_report_deploy.status == "P":
results_str += "\n## Coverage Results\n"
# Link the dashboard page using "cov_report_page" value.
if hasattr(self, "cov_report_page"):
results_str += "\n### [Coverage Dashboard]"
results_str += "({})\n\n".format(
getattr(self, "cov_report_page"))
results_str += self.cov_report_deploy.cov_results
self.results_summary[
"Coverage"] = self.cov_report_deploy.cov_total
else:
self.results_summary["Coverage"] = "--"
# append link of detail result to block name
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
# Append failures for triage
self.results_md = results_str + fail_msgs
results_str += fail_msgs
# Write results to the scratch area
results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
f = open(results_file, 'w')
f.write(self.results_md)
f.close()
# Return only the tables
log.info("[results page]: [%s] [%s]", self.name, results_file)
return results_str
def gen_results_summary(self):
# sim summary result has 5 columns from each SimCfg.results_summary
header = ["Name", "Passing", "Total", "Pass Rate"]
if self.cov:
header.append('Coverage')
table = [header]
colalign = ("center", ) * len(header)
for item in self.cfgs:
row = []
for title in item.results_summary:
row.append(item.results_summary[title])
if row == []:
continue
table.append(row)
self.results_summary_md = "## " + self.results_title + " (Summary)\n"
self.results_summary_md += "### " + self.timestamp_long + "\n"
self.results_summary_md += tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=colalign)
print(self.results_summary_md)
return self.results_summary_md
def _publish_results(self):
'''Publish coverage results to the opentitan web server.'''
super()._publish_results()
if self.cov:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
log.info("Publishing coverage results to %s",
results_server_dir_url)
cmd = (self.results_server_cmd + " -m cp -R " +
self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir)
try:
cmd_output = subprocess.run(args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
except Exception as e:
log.error("%s: Failed to publish results:\n\"%s\"", e,
str(cmd))
| util/dvsim/SimCfg.py | 25,633 | Simulation configuration object
A simulation configuration class holds key information required for building a DV
regression framework.
Use the last regression coverage data to open up the GUI tool to
analyze the coverage.
Create deploy objects from the build and run lists.
Create initial set of directories
The function is called after the regression has completed. It collates the
status of all run targets and generates a dict. It parses the testplan and
maps the generated result to the testplan entries to generate a final table
(list). It also prints the full list of failures for debug / triage. If cov
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
Publish coverage results to the opentitan web server.
Public facing API for analyzing coverage.
Public facing API for _create_deploy_objects().
This is a public facing API, so we use "self.cfgs" instead of self.
Generate the results table from the test runs (builds are ignored).
The table has 3 columns - name, passing and total as a list of dicts.
This is populated for all tests. The number of passing and total is
in reference to the number of iterations or reseeds for that test.
This list of dicts is directly consumed by the Testplan::results_table
method for testplan mapping / annotation.
kill running processes and jobs gracefully
Choose a supported wave dumping format
fmts is a list of formats that the chosen tool supports. Return the first
that we think is possible (e.g. not fsdb if Verdi is not installed).
Decide on the correct dumping format
This is called after reading the config file. tool is the chosen tool,
which will always have been resolved by this point. waves is a boolean
which determines whether waves should be dumped at all (from the --waves
argument). dump is the dumping format chosen on the command line or None.
Class describing simulation configuration object
Copyright lowRISC contributors. Licensed under the Apache License, Version 2.0, see LICENSE for details. SPDX-License-Identifier: Apache-2.0 Look up which dumping formats the tool supports If the user has specified their preferred dumping format, use it. As a sanity check, error out if the chosen tool doesn't support the format, but only if we know about the tool. If not, we'll just assume they know what they're doing. If the user hasn't specified a dumping format, but has asked for waves, we need to decide on a format for them. If fmts is None, we don't know about this tool. Maybe it's a new simulator, in which case, default to VPD and hope for the best. Options set from command line Disable cov if --build-only is passed. Set default sim modes for unpacking Options built from cfg_file files Options from tools - for building and running tests Generated data structures If is_master_cfg is set, then each cfg will have its own cov_deploy. Maintain an array of those in cov_deploys. Parse the cfg_file file tree Choose a dump format now. Note that this has to happen after parsing the configuration format because our choice might depend on the chosen tool. If build_unique is set, then add current timestamp to uniquify it Process overrides before substituting the wildcards. Make substitutions, while ignoring the following wildcards TODO: Find a way to set these in sim cfg instead Set the title for simulation results. Stuff below only pertains to individual cfg (not master cfg) or individual selected cfgs (if select_cfgs is configured via command line) TODO: find a better way to support select_cfgs If self.tool is None at this point, there was no --tool argument on the command line, and there is no default tool set in the config file. That's ok if this is a master config (where the sub-configurations can choose tools themselves), but not otherwise. Print info: Set directories with links for ease of debug / triage. Use the default build mode for tests that do not specify it Create objects from raw dicts - build_modes, sim_modes, run_modes, tests and regressions, only if not a master cfg obj Post init checks Run some post init checks Purge the output directories. This operates on self. Create build and run modes objects Walk through build modes enabled on the CLI and append the opts Walk through run modes enabled on the CLI and append the opts Create tests from given list of items Regressions Parse testplan if provided. Extract tests in each milestone and add them as regression target. Create regressions Walk through the list of items to run and create the build and run objects. Allow multiple regressions to run as long as the do not enable sim_modes or run_modes Check if there are items to run Process regressions first Merge regression's build and run opts with its tests and their build_modes Process individual tests Merge the global build and run opts Check if all items have been processed Process reseed override and create the build_list Override reseed if available. Apply reseed multiplier if set on the command line. Create the unique set of builds needed. Invoking system calls has a performance penalty. Construct a single command line chained with '&&' to invoke the system call only once, rather than multiple times. Create the build and run list first Create cov_merge and cov_report objects Generate reports only if merge was successful; add it as a dependency of merge. Create initial set of directories before kicking off the regression. Also, create cov_deploys deploy additional commands as needed. We do this separated for coverage since that needs to happen at the end. Invoke the base class method to run the regression. If coverage is enabled, then deploy the coverage tasks. TODO: add support for html Generate results table for runs. Add title if there are indeed failures Generate results table for runs. Add path to testplan. TODO: check if testplan is not null? Map regr results to the testplan entries. Append coverage results of coverage was enabled. Link the dashboard page using "cov_report_page" value. append link of detail result to block name Append failures for triage Write results to the scratch area Return only the tables sim summary result has 5 columns from each SimCfg.results_summary | 6,240 | en | 0.865599 |
import platform
from conans.client.graph.graph import CONTEXT_BUILD
from conans.errors import ConanException
class _SystemPackageManagerTool(object):
mode_check = "check"
mode_install = "install"
tool_name = None
install_command = ""
update_command = ""
check_command = ""
def __init__(self, conanfile):
self._conanfile = conanfile
self._active_tool = self._conanfile.conf.get("tools.system.package_manager:tool", default=self.get_default_tool())
self._sudo = self._conanfile.conf.get("tools.system.package_manager:sudo", default=False, check_type=bool)
self._sudo_askpass = self._conanfile.conf.get("tools.system.package_manager:sudo_askpass", default=False, check_type=bool)
self._mode = self._conanfile.conf.get("tools.system.package_manager:mode", default=self.mode_check)
self._arch = self._conanfile.settings_build.get_safe('arch') \
if self._conanfile.context == CONTEXT_BUILD else self._conanfile.settings.get_safe('arch')
self._arch_names = {}
self._arch_separator = ""
def get_default_tool(self):
os_name = platform.system()
if os_name in ["Linux", "FreeBSD"]:
import distro
os_name = distro.id() or os_name
elif os_name == "Windows" and self._conanfile.conf.get("tools.microsoft.bash:subsystem") == "msys2":
os_name = "msys2"
manager_mapping = {"apt-get": ["Linux", "ubuntu", "debian"],
"yum": ["pidora", "scientific", "xenserver", "amazon", "oracle", "amzn",
"almalinux", "rocky"],
"dnf": ["fedora", "rhel", "centos", "mageia"],
"brew": ["Darwin"],
"pacman": ["arch", "manjaro", "msys2"],
"choco": ["Windows"],
"zypper": ["opensuse", "sles"],
"pkg": ["freebsd"],
"pkgutil": ["Solaris"]}
for tool, distros in manager_mapping.items():
if os_name in distros:
return tool
def get_package_name(self, package):
# TODO: should we only add the arch if cross-building?
if self._arch in self._arch_names:
return "{}{}{}".format(package, self._arch_separator,
self._arch_names.get(self._arch))
return package
@property
def sudo_str(self):
sudo = "sudo " if self._sudo else ""
askpass = "-A " if self._sudo and self._sudo_askpass else ""
return "{}{}".format(sudo, askpass)
def run(self, method, *args, **kwargs):
if self._active_tool == self.__class__.tool_name:
return method(*args, **kwargs)
def install(self, *args, **kwargs):
return self.run(self._install, *args, **kwargs)
def update(self, *args, **kwargs):
return self.run(self._update, *args, **kwargs)
def check(self, *args, **kwargs):
return self.run(self._check, *args, **kwargs)
def _install(self, packages, update=False, check=True, **kwargs):
if update:
self.update()
if check:
packages = self.check(packages)
if self._mode == self.mode_check and packages:
raise ConanException("System requirements: '{0}' are missing but can't install "
"because tools.system.package_manager:mode is '{1}'."
"Please update packages manually or set "
"'tools.system.package_manager:mode' "
"to '{2}' in the [conf] section of the profile, "
"or in the command line using "
"'-c tools.system.package_manager:mode={2}'".format(", ".join(packages),
self.mode_check,
self.mode_install))
elif packages:
packages_arch = [self.get_package_name(package) for package in packages]
if packages_arch:
command = self.install_command.format(sudo=self.sudo_str,
tool=self.tool_name,
packages=" ".join(packages_arch),
**kwargs)
return self._conanfile.run(command)
else:
self._conanfile.output.info("System requirements: {} already "
"installed".format(" ".join(packages)))
def _update(self):
if self._mode == self.mode_check:
raise ConanException("Can't update because tools.system.package_manager:mode is '{0}'."
"Please update packages manually or set "
"'tools.system.package_manager:mode' "
"to '{1}' in the [conf] section of the profile, "
"or in the command line using "
"'-c tools.system.package_manager:mode={1}'".format(self.mode_check,
self.mode_install))
command = self.update_command.format(sudo=self.sudo_str, tool=self.tool_name)
return self._conanfile.run(command)
def _check(self, packages):
missing = [pkg for pkg in packages if self.check_package(self.get_package_name(pkg)) != 0]
return missing
def check_package(self, package):
command = self.check_command.format(tool=self.tool_name,
package=package)
return self._conanfile.run(command, ignore_errors=True)
class Apt(_SystemPackageManagerTool):
# TODO: apt? apt-get?
tool_name = "apt-get"
install_command = "{sudo}{tool} install -y {recommends}{packages}"
update_command = "{sudo}{tool} update"
check_command = "dpkg-query -W -f='${{Status}}' {package} | grep -q \"ok installed\""
def __init__(self, conanfile, arch_names=None):
super(Apt, self).__init__(conanfile)
self._arch_names = {"x86_64": "amd64",
"x86": "i386",
"ppc32": "powerpc",
"ppc64le": "ppc64el",
"armv7": "arm",
"armv7hf": "armhf",
"armv8": "arm64",
"s390x": "s390x"} if arch_names is None else arch_names
self._arch_separator = ":"
def install(self, packages, update=False, check=False, recommends=False):
recommends_str = '' if recommends else '--no-install-recommends '
return super(Apt, self).install(packages, update=update, check=check,
recommends=recommends_str)
class Yum(_SystemPackageManagerTool):
tool_name = "yum"
install_command = "{sudo}{tool} install -y {packages}"
update_command = "{sudo}{tool} check-update -y"
check_command = "rpm -q {package}"
def __init__(self, conanfile, arch_names=None):
super(Yum, self).__init__(conanfile)
self._arch_names = {"x86_64": "x86_64",
"x86": "i?86",
"ppc32": "powerpc",
"ppc64le": "ppc64le",
"armv7": "armv7",
"armv7hf": "armv7hl",
"armv8": "aarch64",
"s390x": "s390x"} if arch_names is None else arch_names
self._arch_separator = "."
class Dnf(Yum):
tool_name = "dnf"
class Brew(_SystemPackageManagerTool):
tool_name = "brew"
install_command = "{sudo}{tool} install {packages}"
update_command = "{sudo}{tool} update"
check_command = 'test -n "$({tool} ls --versions {package})"'
class Pkg(_SystemPackageManagerTool):
tool_name = "pkg"
install_command = "{sudo}{tool} install -y {packages}"
update_command = "{sudo}{tool} update"
check_command = "{tool} info {package}"
class PkgUtil(_SystemPackageManagerTool):
tool_name = "pkgutil"
install_command = "{sudo}{tool} --install --yes {packages}"
update_command = "{sudo}{tool} --catalog"
check_command = 'test -n "`{tool} --list {package}`"'
class Chocolatey(_SystemPackageManagerTool):
tool_name = "choco"
install_command = "{tool} --install --yes {packages}"
update_command = "{tool} outdated"
check_command = '{tool} search --local-only --exact {package} | ' \
'findstr /c:"1 packages installed."'
class PacMan(_SystemPackageManagerTool):
tool_name = "pacman"
install_command = "{sudo}{tool} -S --noconfirm {packages}"
update_command = "{sudo}{tool} -Syyu --noconfirm"
check_command = "{tool} -Qi {package}"
def __init__(self, conanfile, arch_names=None):
super(PacMan, self).__init__(conanfile)
self._arch_names = {"x86": "lib32"} if arch_names is None else arch_names
self._arch_separator = "-"
class Zypper(_SystemPackageManagerTool):
tool_name = "zypper"
install_command = "{sudo}{tool} --non-interactive in {packages}"
update_command = "{sudo}{tool} --non-interactive ref"
check_command = "rpm -q {package}"
| conan/tools/system/package_manager.py | 9,539 | TODO: should we only add the arch if cross-building? TODO: apt? apt-get? | 72 | en | 0.548788 |
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
from ironic.dhcp import base
from ironic.drivers.modules import ssh
neutron_opts = [
cfg.StrOpt('url',
default='http://$my_ip:9696',
help='URL for connecting to neutron.'),
cfg.IntOpt('url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds.'),
cfg.IntOpt('retries',
default=3,
help='Client retries in the case of a failed request.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default authentication strategy to use when connecting '
'to neutron. Can be either "keystone" or "noauth". '
'Running neutron in noauth mode (related to but not '
'affected by this setting) is insecure and should only be '
'used for testing.'),
cfg.StrOpt('cleaning_network_uuid',
help='UUID of the network to create Neutron ports on when '
'booting to a ramdisk for cleaning/zapping using Neutron '
'DHCP')
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(neutron_opts, group='neutron')
LOG = logging.getLogger(__name__)
def _build_client(token=None):
"""Utility function to create Neutron client."""
params = {
'timeout': CONF.neutron.url_timeout,
'retries': CONF.neutron.retries,
'insecure': CONF.keystone_authtoken.insecure,
'ca_cert': CONF.keystone_authtoken.certfile,
}
if CONF.neutron.auth_strategy not in ['noauth', 'keystone']:
raise exception.ConfigInvalid(_('Neutron auth_strategy should be '
'either "noauth" or "keystone".'))
if CONF.neutron.auth_strategy == 'noauth':
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = 'noauth'
elif (CONF.neutron.auth_strategy == 'keystone' and
token is None):
params['endpoint_url'] = (CONF.neutron.url or
keystone.get_service_url('neutron'))
params['username'] = CONF.keystone_authtoken.admin_user
params['tenant_name'] = CONF.keystone_authtoken.admin_tenant_name
params['password'] = CONF.keystone_authtoken.admin_password
params['auth_url'] = (CONF.keystone_authtoken.auth_uri or '')
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
else:
params['token'] = token
params['endpoint_url'] = CONF.neutron.url
params['auth_strategy'] = None
return clientv20.Client(**params)
class NeutronDHCPApi(base.BaseDHCP):
"""API for communicating to neutron 2.x API."""
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
"""
port_req_body = {'port': {'extra_dhcp_opts': dhcp_options}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update Neutron port %s."), port_id)
raise exception.FailedToUpdateDHCPOptOnPort(port_id=port_id)
def update_port_address(self, port_id, address, token=None):
"""Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
"""
port_req_body = {'port': {'mac_address': address}}
try:
_build_client(token).update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to update MAC address on Neutron "
"port %s."), port_id)
raise exception.FailedToUpdateMacOnPort(port_id=port_id)
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
"""
if vifs is None:
vifs = network.get_node_vif_ids(task)
if not vifs:
raise exception.FailedToUpdateDHCPOptOnPort(
_("No VIFs found for node %(node)s when attempting "
"to update DHCP BOOT options.") %
{'node': task.node.uuid})
failures = []
for port_id, port_vif in vifs.items():
try:
self.update_port_dhcp_opts(port_vif, options,
token=task.context.auth_token)
except exception.FailedToUpdateDHCPOptOnPort:
failures.append(port_id)
if failures:
if len(failures) == len(vifs):
raise exception.FailedToUpdateDHCPOptOnPort(_(
"Failed to set DHCP BOOT options for any port on node %s.")
% task.node.uuid)
else:
LOG.warning(_LW("Some errors were encountered when updating "
"the DHCP BOOT options for node %(node)s on "
"the following ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
# TODO(adam_g): Hack to workaround bug 1334447 until we have a
# mechanism for synchronizing events with Neutron. We need to sleep
# only if we are booting VMs, which is implied by SSHPower, to ensure
# they do not boot before Neutron agents have setup sufficient DHCP
# config for netboot.
if isinstance(task.driver.power, ssh.SSHPower):
LOG.debug("Waiting 15 seconds for Neutron.")
time.sleep(15)
def _get_fixed_ip_address(self, port_uuid, client):
"""Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
ip_address = None
try:
neutron_port = client.show_port(port_uuid).get('port')
except neutron_client_exc.NeutronClientException:
LOG.exception(_LE("Failed to Get IP address on Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
fixed_ips = neutron_port.get('fixed_ips')
# NOTE(faizan) At present only the first fixed_ip assigned to this
# neutron port will be used, since nova allocates only one fixed_ip
# for the instance.
if fixed_ips:
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
if netutils.is_valid_ipv4(ip_address):
return ip_address
else:
LOG.error(_LE("Neutron returned invalid IPv4 address %s."),
ip_address)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error(_LE("No IP address assigned to Neutron port %s."),
port_uuid)
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
def _get_port_ip_address(self, task, port_uuid, client):
"""Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
vifs = network.get_node_vif_ids(task)
if not vifs:
LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
" to get port IP address."),
{'node': task.node.uuid})
raise exception.FailedToGetIPAddressOnPort(port_id=port_uuid)
port_vif = vifs[port_uuid]
port_ip_address = self._get_fixed_ip_address(port_vif, client)
return port_ip_address
def get_ip_addresses(self, task):
"""Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
"""
client = _build_client(task.context.auth_token)
failures = []
ip_addresses = []
for port in task.ports:
try:
port_ip_address = self._get_port_ip_address(task, port.uuid,
client)
ip_addresses.append(port_ip_address)
except (exception.FailedToGetIPAddressOnPort,
exception.InvalidIPv4Address):
failures.append(port.uuid)
if failures:
LOG.warn(_LW("Some errors were encountered on node %(node)s"
" while retrieving IP address on the following"
" ports: %(ports)s."),
{'node': task.node.uuid, 'ports': failures})
return ip_addresses
def create_cleaning_ports(self, task):
"""Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
"""
if not CONF.neutron.cleaning_network_uuid:
raise exception.InvalidParameterValue(_('Valid cleaning network '
'UUID not provided'))
neutron_client = _build_client(task.context.auth_token)
body = {
'port': {
'network_id': CONF.neutron.cleaning_network_uuid,
'admin_state_up': True,
}
}
ports = {}
for ironic_port in task.ports:
body['port']['mac_address'] = ironic_port.address
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
def delete_cleaning_ports(self, task):
"""Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
"""
neutron_client = _build_client(task.context.auth_token)
macs = [p.address for p in task.ports]
params = {
'network_id': CONF.neutron.cleaning_network_uuid
}
try:
ports = neutron_client.list_ports(**params)
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not get cleaning network vif for %(node)s '
'from Neutron, possible network issue. %(exc)s') %
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
# Only delete ports using the node's mac addresses
if neutron_port.get('mac_address') in macs:
try:
neutron_client.delete_port(neutron_port.get('id'))
except neutron_client_exc.ConnectionFailed as e:
msg = (_('Could not remove cleaning ports on network '
'%(net)s from %(node)s, possible network issue. '
'%(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
raise exception.NodeCleaningFailure(msg)
def _rollback_cleaning_ports(self, task):
"""Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
"""
try:
self.delete_cleaning_ports(task)
except Exception:
# Log the error, but let the caller invoke the
# manager.cleaning_error_handler().
LOG.exception(_LE('Failed to rollback cleaning port '
'changes for node %s') % task.node.uuid)
| ironic/dhcp/neutron.py | 16,025 | API for communicating to neutron 2.x API.
Utility function to create Neutron client.
Get a port's fixed ip address.
:param port_uuid: Neutron port id.
:param client: Neutron client instance.
:returns: Neutron port ip address.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
Get ip address of ironic port assigned by neutron.
:param task: a TaskManager instance.
:param port_uuid: ironic Node's port UUID.
:param client: Neutron client instance.
:returns: Neutron port ip address associated with Node's port.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
Attempts to delete any ports created by cleaning
Purposefully will not raise any exceptions so error handling can
continue.
:param task: a TaskManager instance.
Create neutron ports for each port on task.node to boot the ramdisk.
:param task: a TaskManager instance.
:raises: InvalidParameterValue if the cleaning network is None
:returns: a dictionary in the form {port.uuid: neutron_port['id']}
Deletes the neutron port created for booting the ramdisk.
:param task: a TaskManager instance.
Get IP addresses for all ports in `task`.
:param task: a TaskManager instance.
:returns: List of IP addresses associated with task.ports.
Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: a dict of Neutron port dicts to update DHCP options on.
The keys should be Ironic port UUIDs, and the values should be
Neutron port UUIDs
If the value is None, will get the list of ports from the Ironic
port objects.
Update a port's mac address.
:param port_id: Neutron port id.
:param address: new MAC address.
:param token: optional auth token.
:raises: FailedToUpdateMacOnPort
Update a port's attributes.
Update one or more DHCP options on the specified port.
For the relevant API spec, see
http://docs.openstack.org/api/openstack-network/2.0/content/extra-dhc-opt-ext-update.html
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: optional auth token.
:raises: FailedToUpdateDHCPOptOnPort
Copyright 2014 OpenStack Foundation All Rights Reserved Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO(adam_g): Hack to workaround bug 1334447 until we have a mechanism for synchronizing events with Neutron. We need to sleep only if we are booting VMs, which is implied by SSHPower, to ensure they do not boot before Neutron agents have setup sufficient DHCP config for netboot. NOTE(faizan) At present only the first fixed_ip assigned to this neutron port will be used, since nova allocates only one fixed_ip for the instance. Match return value of get_node_vif_ids() Iterate the list of Neutron port dicts, remove the ones we added Only delete ports using the node's mac addresses Log the error, but let the caller invoke the manager.cleaning_error_handler(). | 4,159 | en | 0.656961 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import mock
from oslo_log import versionutils
from six.moves import http_client
from testtools import matchers
from keystone.contrib.endpoint_filter import routers
from keystone.tests import unit
from keystone.tests.unit import test_v3
class EndpointFilterTestCase(test_v3.RestfulTestCase):
def config_overrides(self):
super(EndpointFilterTestCase, self).config_overrides()
self.config_fixture.config(
group='catalog', driver='endpoint_filter.sql')
def setUp(self):
super(EndpointFilterTestCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
class EndpointFilterDeprecateTestCase(test_v3.RestfulTestCase):
@mock.patch.object(versionutils, 'report_deprecated_feature')
def test_exception_happens(self, mock_deprecator):
routers.EndpointFilterExtension(mock.ANY)
mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
args, _kwargs = mock_deprecator.call_args
self.assertIn("Remove endpoint_filter_extension from", args[1])
class EndpointFilterCRUDTestCase(EndpointFilterTestCase):
def test_create_endpoint_project_association(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url)
def test_create_endpoint_project_association_with_invalid_project(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=http_client.NOT_FOUND)
def test_create_endpoint_project_association_with_invalid_endpoint(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_create_endpoint_project_association_with_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id})
def test_check_endpoint_project_association(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
def test_check_endpoint_project_association_with_invalid_project(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=http_client.NOT_FOUND)
def test_check_endpoint_project_association_with_invalid_endpoint(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_list_endpoints_associated_with_valid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint,
resource_url=resource_url)
def test_list_endpoints_associated_with_invalid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case.
"""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_list_projects_associated_with_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
'endpoint_id': self.endpoint_id}
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project,
resource_url=resource_url)
def test_list_projects_with_no_endpoint_project_association(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id})
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_associated_with_invalid_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_remove_endpoint_project_association(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
def test_remove_endpoint_project_association_with_invalid_project(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
expected_status=http_client.NOT_FOUND)
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_endpoint_project_association_cleanup_when_project_deleted(self):
self.put(self.default_request_url)
association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id})
r = self.get(association_url)
self.assertValidProjectListResponse(r, expected_length=1)
self.delete('/projects/%(project_id)s' % {
'project_id': self.default_domain_project_id})
r = self.get(association_url)
self.assertValidProjectListResponse(r, expected_length=0)
def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(association_url)
self.assertValidEndpointListResponse(r, expected_length=1)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
r = self.get(association_url)
self.assertValidEndpointListResponse(r, expected_length=0)
@unit.skip_if_cache_disabled('catalog')
def test_create_endpoint_project_association_invalidates_cache(self):
# NOTE(davechen): create another endpoint which will be added to
# default project, this should be done at first since
# `create_endpoint` will also invalidate cache.
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
region_id=self.region_id,
interface='public',
id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
# create endpoint project association.
self.put(self.default_request_url)
# should get back only one endpoint that was just created.
user_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
# there is only one endpoints associated with the default project.
self.assertEqual(1, len(catalog[0]['endpoints']))
self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
# add the second endpoint to default project, bypassing
# catalog_api API manager.
self.catalog_api.driver.add_endpoint_to_project(
endpoint_id2,
self.default_domain_project_id)
# but, we can just get back one endpoint from the cache, since the
# catalog is pulled out from cache and its haven't been invalidated.
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(1, len(catalog[0]['endpoints']))
# remove the endpoint2 from the default project, and add it again via
# catalog_api API manager.
self.catalog_api.driver.remove_endpoint_from_project(
endpoint_id2,
self.default_domain_project_id)
# add second endpoint to default project, this can be done by calling
# the catalog_api API manager directly but call the REST API
# instead for consistency.
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': endpoint_id2})
# should get back two endpoints since the cache has been
# invalidated when the second endpoint was added to default project.
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(2, len(catalog[0]['endpoints']))
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
@unit.skip_if_cache_disabled('catalog')
def test_remove_endpoint_from_project_invalidates_cache(self):
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
region_id=self.region_id,
interface='public',
id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
# create endpoint project association.
self.put(self.default_request_url)
# add second endpoint to default project.
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': endpoint_id2})
# should get back only one endpoint that was just created.
user_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
# there are two endpoints associated with the default project.
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
self.assertEqual(2, len(catalog[0]['endpoints']))
self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
# remove the endpoint2 from the default project, bypassing
# catalog_api API manager.
self.catalog_api.driver.remove_endpoint_from_project(
endpoint_id2,
self.default_domain_project_id)
# but, we can just still get back two endpoints from the cache,
# since the catalog is pulled out from cache and its haven't
# been invalidated.
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(2, len(catalog[0]['endpoints']))
# add back the endpoint2 to the default project, and remove it by
# catalog_api API manage.
self.catalog_api.driver.add_endpoint_to_project(
endpoint_id2,
self.default_domain_project_id)
# remove the endpoint2 from the default project, this can be done
# by calling the catalog_api API manager directly but call
# the REST API instead for consistency.
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': endpoint_id2})
# should only get back one endpoint since the cache has been
# invalidated after the endpoint project association was removed.
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(1, len(catalog[0]['endpoints']))
self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
class EndpointFilterTokenRequestTestCase(EndpointFilterTestCase):
def test_project_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from project scoped token filtered."""
# create a project to work with
ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id})
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(project['id'], r.result['token']['project']['id'])
def test_default_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from default scoped token filtered."""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(self.project['id'],
r.result['token']['project']['id'])
# Ensure name of the service exists
self.assertIn('name', r.result['token']['catalog'][0])
# region and region_id should be the same in endpoints
endpoint = r.result['token']['catalog'][0]['endpoints'][0]
self.assertIn('region', endpoint)
self.assertIn('region_id', endpoint)
self.assertEqual(endpoint['region'], endpoint['region_id'])
def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter does not affect no catalog."""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False)
self.assertEqual(self.project['id'],
r.result['token']['project']['id'])
def test_invalid_endpoint_project_association(self):
"""Verify an invalid endpoint-project association is handled."""
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id})
# create a second temporary endpoint
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
region_id=self.region_id,
interface='public',
id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id2})
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(self.project['id'],
r.result['token']['project']['id'])
def test_disabled_endpoint(self):
"""Test that a disabled endpoint is handled."""
# Add an enabled endpoint to the default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id})
# Add a disabled endpoint to the default project.
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({
'id': disabled_endpoint_id,
'enabled': False,
'interface': 'internal'
})
self.catalog_api.create_endpoint(disabled_endpoint_id,
disabled_endpoint_ref)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': disabled_endpoint_id})
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids)
def test_multiple_endpoint_project_associations(self):
def _create_an_endpoint():
endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id)
r = self.post('/endpoints', body={'endpoint': endpoint_ref})
return r.result['endpoint']['id']
# create three endpoints
endpoint_id1 = _create_an_endpoint()
endpoint_id2 = _create_an_endpoint()
_create_an_endpoint()
# only associate two endpoints with project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id1})
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id2})
# there should be only two endpoints in token catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=2)
def test_get_auth_catalog_using_endpoint_filter(self):
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
token_data = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
token_data,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
auth_catalog = self.get('/auth/catalog',
token=token_data.headers['X-Subject-Token'])
self.assertEqual(token_data.result['token']['catalog'],
auth_catalog.result['catalog'])
class JsonHomeTests(EndpointFilterTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_projects': {
'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects',
'href-vars': {
'endpoint_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'endpoint_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_groups': {
'href': '/OS-EP-FILTER/endpoint_groups',
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group_to_project_association': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}',
'href-vars': {
'project_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'project_id',
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/projects_associated_with_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoints_in_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/endpoints',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/project_endpoint_groups': {
'href-template': '/OS-EP-FILTER/projects/{project_id}/'
'endpoint_groups',
'href-vars': {
'project_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'project_id',
},
},
}
class EndpointGroupCRUDTestCase(EndpointFilterTestCase):
DEFAULT_ENDPOINT_GROUP_BODY = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'interface': 'admin'
},
'name': 'endpoint_group_name'
}
}
DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups'
def test_create_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Valid endpoint group test case.
"""
r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
expected_filters = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['filters'])
expected_name = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['name'])
self.assertEqual(expected_filters,
r.result['endpoint_group']['filters'])
self.assertEqual(expected_name, r.result['endpoint_group']['name'])
self.assertThat(
r.result['endpoint_group']['links']['self'],
matchers.EndsWith(
'/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': r.result['endpoint_group']['id']}))
def test_create_invalid_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Invalid endpoint group creation test case.
"""
invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=invalid_body,
expected_status=http_client.BAD_REQUEST)
def test_get_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
endpoint_group_id = response.result['endpoint_group']['id']
endpoint_group_filters = response.result['endpoint_group']['filters']
endpoint_group_name = response.result['endpoint_group']['name']
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url)
self.assertEqual(endpoint_group_id,
response.result['endpoint_group']['id'])
self.assertEqual(endpoint_group_filters,
response.result['endpoint_group']['filters'])
self.assertEqual(endpoint_group_name,
response.result['endpoint_group']['name'])
self.assertThat(response.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_get_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url, expected_status=http_client.NOT_FOUND)
def test_check_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Valid endpoint_group_id test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=http_client.OK)
def test_check_invalid_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Invalid endpoint_group_id test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=http_client.NOT_FOUND)
def test_patch_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'region_id': 'UK'}
body['endpoint_group']['name'] = 'patch_test'
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.patch(url, body=body)
self.assertEqual(endpoint_group_id,
r.result['endpoint_group']['id'])
self.assertEqual(body['endpoint_group']['filters'],
r.result['endpoint_group']['filters'])
self.assertThat(r.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_patch_nonexistent_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'name': 'patch_test'
}
}
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': 'ABC'}
self.patch(url, body=body, expected_status=http_client.NOT_FOUND)
def test_patch_invalid_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'region': 'UK'
},
'name': 'patch_test'
}
}
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.patch(url, body=body, expected_status=http_client.BAD_REQUEST)
# Perform a GET call to ensure that the content remains
# the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update
# with an invalid filter
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.get(url)
del r.result['endpoint_group']['id']
del r.result['endpoint_group']['links']
self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result)
def test_delete_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_delete_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url, expected_status=http_client.NOT_FOUND)
def test_add_endpoint_group_to_project(self):
"""Create a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
def test_add_endpoint_group_to_project_with_invalid_project_id(self):
"""Create an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.put(url, expected_status=http_client.NOT_FOUND)
def test_get_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
response = self.get(url)
self.assertEqual(
endpoint_group_id,
response.result['project_endpoint_group']['endpoint_group_id'])
self.assertEqual(
self.project_id,
response.result['project_endpoint_group']['project_id'])
def test_get_invalid_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
endpoint_group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_list_endpoint_groups_in_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': self.project_id})
response = self.get(url)
self.assertEqual(
endpoint_group_id,
response.result['endpoint_groups'][0]['id'])
def test_list_endpoint_groups_in_invalid_project(self):
"""Test retrieving from invalid project."""
project_id = uuid.uuid4().hex
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': project_id})
self.get(url, expected_status=http_client.NOT_FOUND)
def test_empty_endpoint_groups_in_project(self):
"""Test when no endpoint groups associated with the project."""
url = ('/OS-EP-FILTER/projects/%(project_id)s/endpoint_groups' %
{'project_id': self.project_id})
response = self.get(url)
self.assertEqual(0, len(response.result['endpoint_groups']))
def test_check_endpoint_group_to_project(self):
"""Test HEAD with a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.head(url, expected_status=http_client.OK)
def test_check_endpoint_group_to_project_with_invalid_project_id(self):
"""Test HEAD with an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint group to project association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
# send a head request with an invalid project id
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.head(url, expected_status=http_client.NOT_FOUND)
def test_list_endpoint_groups(self):
"""GET /OS-EP-FILTER/endpoint_groups."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# recover all endpoint groups
url = '/OS-EP-FILTER/endpoint_groups'
r = self.get(url)
self.assertNotEmpty(r.result['endpoint_groups'])
self.assertEqual(endpoint_group_id,
r.result['endpoint_groups'][0].get('id'))
def test_list_projects_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of projects associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects' %
{'endpoint_group_id': endpoint_group_id})
self.get(url)
def test_list_endpoints_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
Valid endpoint group test case.
"""
# create a service
service_ref = unit.new_service_ref()
response = self.post(
'/services',
body={'service': service_ref})
service_id = response.result['service']['id']
# create an endpoint
endpoint_ref = unit.new_endpoint_ref(service_id=service_id,
interface='public',
region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
# create an endpoint group
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# create association
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of endpoints associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/endpoints' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
self.assertNotEmpty(r.result['endpoints'])
self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id'))
def test_list_endpoints_associated_with_project_endpoint_group(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project, endpoint id, and endpoint group test case.
"""
# create a temporary service
service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
# create additional endpoints
self._create_endpoint_and_associations(
self.default_domain_project_id, service_id2)
self._create_endpoint_and_associations(
self.default_domain_project_id)
# create project and endpoint association with default endpoint:
self.put(self.default_request_url)
# create an endpoint group that contains a different endpoint
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id2}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# associate endpoint group with project
self._create_endpoint_group_project_association(
endpoint_group_id, self.default_domain_project_id)
# Now get a list of the filtered endpoints
endpoints_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(2, len(endpoints))
# Ensure catalog includes the endpoints from endpoint_group project
# association, this is needed when a project scoped token is issued
# and "endpoint_filter.sql" backend driver is in place.
user_id = uuid.uuid4().hex
catalog_list = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(2, len(catalog_list))
# Now remove project endpoint group association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.delete(url)
# Now remove endpoint group
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(1, len(endpoints))
catalog_list = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertEqual(1, len(catalog_list))
def test_endpoint_group_project_cleanup_with_project(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# Now delete the project and then try and retrieve the project
# endpoint group association again
self.delete('/projects/%(project_id)s' % {
'project_id': project['id']})
self.get(url, expected_status=http_client.NOT_FOUND)
def test_endpoint_group_project_cleanup_with_endpoint_group(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# now remove the project endpoint group association
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_removing_an_endpoint_group_project(self):
# create an endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint_group project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.put(url)
# remove the endpoint group project
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_remove_endpoint_group_with_project_association(self):
# create an endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint_group project
project_endpoint_group_url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.put(project_endpoint_group_url)
# remove endpoint group, the associated endpoint_group project will
# be removed as well.
endpoint_group_url = ('/OS-EP-FILTER/endpoint_groups/'
'%(endpoint_group_id)s'
% {'endpoint_group_id': endpoint_group_id})
self.delete(endpoint_group_url)
self.get(endpoint_group_url, expected_status=http_client.NOT_FOUND)
self.get(project_endpoint_group_url,
expected_status=http_client.NOT_FOUND)
@unit.skip_if_cache_disabled('catalog')
def test_add_endpoint_group_to_project_invalidates_catalog_cache(self):
# create another endpoint with 'admin' interface which matches
# 'filters' definition in endpoint group, then there should be two
# endpoints returned when retrieving v3 catalog if cache works as
# expected.
# this should be done at first since `create_endpoint` will also
# invalidate cache.
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
region_id=self.region_id,
interface='admin',
id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2)
# create a project and endpoint association.
self.put(self.default_request_url)
# there is only one endpoint associated with the default project.
user_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
# create an endpoint group.
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# add the endpoint group to default project, bypassing
# catalog_api API manager.
self.catalog_api.driver.add_endpoint_group_to_project(
endpoint_group_id,
self.default_domain_project_id)
# can get back only one endpoint from the cache, since the catalog
# is pulled out from cache.
invalid_catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertThat(invalid_catalog[0]['endpoints'],
matchers.HasLength(1))
self.assertEqual(catalog, invalid_catalog)
# remove the endpoint group from default project, and add it again via
# catalog_api API manager.
self.catalog_api.driver.remove_endpoint_group_from_project(
endpoint_group_id,
self.default_domain_project_id)
# add the endpoint group to default project.
self.catalog_api.add_endpoint_group_to_project(
endpoint_group_id,
self.default_domain_project_id)
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
# now, it will return 2 endpoints since the cache has been
# invalidated.
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
@unit.skip_if_cache_disabled('catalog')
def test_remove_endpoint_group_from_project_invalidates_cache(self):
# create another endpoint with 'admin' interface which matches
# 'filters' definition in endpoint group, then there should be two
# endpoints returned when retrieving v3 catalog. But only one
# endpoint will return after the endpoint group's deletion if cache
# works as expected.
# this should be done at first since `create_endpoint` will also
# invalidate cache.
endpoint_id2 = uuid.uuid4().hex
endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
region_id=self.region_id,
interface='admin',
id=endpoint_id2)
self.catalog_api.create_endpoint(endpoint_id2, endpoint2)
# create project and endpoint association.
self.put(self.default_request_url)
# create an endpoint group.
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# add the endpoint group to default project.
self.catalog_api.add_endpoint_group_to_project(
endpoint_group_id,
self.default_domain_project_id)
# should get back two endpoints, one from endpoint project
# association, the other one is from endpoint_group project
# association.
user_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
# remove endpoint_group project association, bypassing
# catalog_api API manager.
self.catalog_api.driver.remove_endpoint_group_from_project(
endpoint_group_id,
self.default_domain_project_id)
# still get back two endpoints, since the catalog is pulled out
# from cache and the cache haven't been invalidated.
invalid_catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertThat(invalid_catalog[0]['endpoints'],
matchers.HasLength(2))
self.assertEqual(catalog, invalid_catalog)
# add back the endpoint_group project association and remove it from
# manager.
self.catalog_api.driver.add_endpoint_group_to_project(
endpoint_group_id,
self.default_domain_project_id)
self.catalog_api.remove_endpoint_group_from_project(
endpoint_group_id,
self.default_domain_project_id)
# should only get back one endpoint since the cache has been
# invalidated after the endpoint_group project association was
# removed.
catalog = self.catalog_api.get_v3_catalog(
user_id,
self.default_domain_project_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
def _create_valid_endpoint_group(self, url, body):
r = self.post(url, body=body)
return r.result['endpoint_group']['id']
def _create_endpoint_group_project_association(self,
endpoint_group_id,
project_id):
url = self._get_project_endpoint_group_url(endpoint_group_id,
project_id)
self.put(url)
def _get_project_endpoint_group_url(self,
endpoint_group_id,
project_id):
return ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects/%(project_id)s' %
{'endpoint_group_id': endpoint_group_id,
'project_id': project_id})
def _create_endpoint_and_associations(self, project_id, service_id=None):
"""Creates an endpoint associated with service and project."""
if not service_id:
# create a new service
service_ref = unit.new_service_ref()
response = self.post(
'/services', body={'service': service_ref})
service_id = response.result['service']['id']
# create endpoint
endpoint_ref = unit.new_endpoint_ref(service_id=service_id,
interface='public',
region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
# now add endpoint to project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint['id']})
return endpoint
| keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py | 59,084 | Creates an endpoint associated with service and project.
Create a valid endpoint group and project association.
Create an invalid endpoint group and project association.
HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Valid endpoint_group_id test case.
Test HEAD with a valid endpoint group and project association.
Test HEAD with an invalid endpoint group and project association.
HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Invalid endpoint_group_id test case.
POST /OS-EP-FILTER/endpoint_groups
Valid endpoint group test case.
PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
POST /OS-EP-FILTER/endpoint_groups
Invalid endpoint group creation test case.
Verify endpoints from default scoped token filtered.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
Test that a disabled endpoint is handled.
Test when no endpoint groups associated with the project.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
Test retrieving project endpoint group association.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
Test retrieving project endpoint group association.
Verify an invalid endpoint-project association is handled.
GET /OS-EP-FILTER/endpoint_groups.
Test retrieving from invalid project.
GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
Valid endpoint group test case.
GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case.
GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project, endpoint id, and endpoint group test case.
GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case.
GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case.
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
Valid endpoint group test case.
GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case.
PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group patch test case.
Verify endpoints from project scoped token filtered.
DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
Verify endpoint filter does not affect no catalog.
Copyright 2013 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE(davechen): create another endpoint which will be added to default project, this should be done at first since `create_endpoint` will also invalidate cache. create endpoint project association. should get back only one endpoint that was just created. there is only one endpoints associated with the default project. add the second endpoint to default project, bypassing catalog_api API manager. but, we can just get back one endpoint from the cache, since the catalog is pulled out from cache and its haven't been invalidated. remove the endpoint2 from the default project, and add it again via catalog_api API manager. add second endpoint to default project, this can be done by calling the catalog_api API manager directly but call the REST API instead for consistency. should get back two endpoints since the cache has been invalidated when the second endpoint was added to default project. create endpoint project association. add second endpoint to default project. should get back only one endpoint that was just created. there are two endpoints associated with the default project. remove the endpoint2 from the default project, bypassing catalog_api API manager. but, we can just still get back two endpoints from the cache, since the catalog is pulled out from cache and its haven't been invalidated. add back the endpoint2 to the default project, and remove it by catalog_api API manage. remove the endpoint2 from the default project, this can be done by calling the catalog_api API manager directly but call the REST API instead for consistency. should only get back one endpoint since the cache has been invalidated after the endpoint project association was removed. create a project to work with grant the user a role on the project set the user's preferred project add one endpoint to the project attempt to authenticate without requesting a project add one endpoint to default project Ensure name of the service exists region and region_id should be the same in endpoints add first endpoint to default project create a second temporary endpoint add second endpoint to default project remove the temporary reference this will create inconsistency in the endpoint filter table which is fixed during the catalog creation for token request Add an enabled endpoint to the default project Add a disabled endpoint to the default project. Create a disabled endpoint that's like the enabled one. Authenticate to get token with catalog create three endpoints only associate two endpoints with project there should be only two endpoints in token catalog add one endpoint to default project create an endpoint group to work with create an endpoint group to work with create an endpoint group to work with create an endpoint group to work with Perform a GET call to ensure that the content remains the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update with an invalid filter create an endpoint group to work with create an endpoint group to work with associate endpoint group with project create an endpoint group to work with associate endpoint group with project create an endpoint group to work with associate endpoint group with project create an endpoint group to work with create an endpoint group to project association send a head request with an invalid project id create an endpoint group to work with recover all endpoint groups create an endpoint group to work with associate endpoint group with project recover list of projects associated with endpoint group create a service create an endpoint create an endpoint group create association recover list of endpoints associated with endpoint group create a temporary service create additional endpoints create project and endpoint association with default endpoint: create an endpoint group that contains a different endpoint associate endpoint group with project Now get a list of the filtered endpoints Ensure catalog includes the endpoints from endpoint_group project association, this is needed when a project scoped token is issued and "endpoint_filter.sql" backend driver is in place. Now remove project endpoint group association Now remove endpoint group create endpoint group create new project and associate with endpoint_group check that we can recover the project endpoint group association Now delete the project and then try and retrieve the project endpoint group association again create endpoint group create new project and associate with endpoint_group check that we can recover the project endpoint group association now remove the project endpoint group association create an endpoint group create an endpoint_group project remove the endpoint group project create an endpoint group create an endpoint_group project remove endpoint group, the associated endpoint_group project will be removed as well. create another endpoint with 'admin' interface which matches 'filters' definition in endpoint group, then there should be two endpoints returned when retrieving v3 catalog if cache works as expected. this should be done at first since `create_endpoint` will also invalidate cache. create a project and endpoint association. there is only one endpoint associated with the default project. create an endpoint group. add the endpoint group to default project, bypassing catalog_api API manager. can get back only one endpoint from the cache, since the catalog is pulled out from cache. remove the endpoint group from default project, and add it again via catalog_api API manager. add the endpoint group to default project. now, it will return 2 endpoints since the cache has been invalidated. create another endpoint with 'admin' interface which matches 'filters' definition in endpoint group, then there should be two endpoints returned when retrieving v3 catalog. But only one endpoint will return after the endpoint group's deletion if cache works as expected. this should be done at first since `create_endpoint` will also invalidate cache. create project and endpoint association. create an endpoint group. add the endpoint group to default project. should get back two endpoints, one from endpoint project association, the other one is from endpoint_group project association. remove endpoint_group project association, bypassing catalog_api API manager. still get back two endpoints, since the catalog is pulled out from cache and the cache haven't been invalidated. add back the endpoint_group project association and remove it from manager. should only get back one endpoint since the cache has been invalidated after the endpoint_group project association was removed. create a new service create endpoint now add endpoint to project | 10,799 | en | 0.766947 |
"""
Unittests of asm2cfg's regexes
"""
import unittest
from src.asm2cfg import asm2cfg
class FunctionHeaderTestCase(unittest.TestCase):
"""
Tests of function header regex
"""
def test_gdb_unstripped(self):
line = 'Dump of assembler code for function test_function:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, 'test_function')
def test_gdb_stripped(self):
line = 'Dump of assembler code from 0x555555555faf to 0x555555557008:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.GDB)
self.assertEqual(fun, '0x555555555faf-0x555555557008')
def test_objdump(self):
line = '000000000000100b <bar>:'
fmt, fun = asm2cfg.parse_function_header(line)
self.assertEqual(fmt, asm2cfg.InputFormat.OBJDUMP)
self.assertEqual(fun, 'bar')
class ParseAddressTestCase(unittest.TestCase):
"""
Tests of parse_address function
"""
def test_absolute(self):
line = '0x000055555557259c: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, ' XYZ')
def test_relative(self):
line = '0x000055555557259c <+11340>: XYZ'
address, rest = asm2cfg.parse_address(line)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x55555557259c)
self.assertIs(address.base, None)
self.assertEqual(address.offset, 11340)
self.assertEqual(rest, ' XYZ')
class ParseBodyTestCase(unittest.TestCase):
"""
Tests of asm2cfg.parse_body function
"""
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_gdb_stripped_known(self):
line = ' call 0x55555558add0 <_Z19exportDebugifyStats>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x55555558add0')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x55555558add0'])
self.assertEqual(rest, '<_Z19exportDebugifyStats>')
def test_gdb_stripped_pic(self):
line = ' call *0x26a16(%rip) # 0x5555555967a8'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call *0x26a16(%rip)')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['*0x26a16(%rip)'])
self.assertEqual(rest, '# 0x5555555967a8')
def test_gdb_plt(self):
line = ' callq 0x1020 <foo@plt>'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq 0x1020')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['0x1020'])
self.assertEqual(rest, '<foo@plt>')
def test_gdb_stripped_nonpic(self):
line = ' call 0x555555555542'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'call 0x555555555542')
self.assertEqual(opcode, 'call')
self.assertEqual(ops, ['0x555555555542'])
self.assertEqual(rest, '')
def test_gdb_indirect_call(self):
line = ' callq *(%rsi)'
body, opcode, ops, rest = asm2cfg.parse_body(line, self.target_info)
self.assertIsNot(body, None)
self.assertEqual(body, 'callq *(%rsi)')
self.assertEqual(opcode, 'callq')
self.assertEqual(ops, ['*(%rsi)'])
self.assertEqual(rest, '')
class ParseTargetTestCase(unittest.TestCase):
"""
Tests of parse_address function
"""
def test_with_offset(self):
line = '<_Z19exportDebugifyStats+123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 123)
self.assertEqual(rest, '')
def test_with_neg_offset(self):
line = '<_Z19exportDebugifyStats-123>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, -123)
self.assertEqual(rest, '')
def test_without_offset(self):
line = '<_Z19exportDebugifyStats>'
address, rest = asm2cfg.parse_target(line)
self.assertIsNot(address, None)
self.assertIs(address.abs, None)
self.assertEqual(address.base, '_Z19exportDebugifyStats')
self.assertEqual(address.offset, 0)
self.assertEqual(rest, '')
class ParseCommentTestCase(unittest.TestCase):
"""
Tests of parse_comment function
"""
def setUp(self):
self.target_info = asm2cfg.X86TargetInfo()
def test_absolute(self):
line = '# 0x5555555967a8'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertIs(address.base, None)
self.assertIs(address.offset, None)
self.assertEqual(rest, '')
def test_symbolic(self):
line = '# 0x5555555967a8 <foo>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x5555555967a8)
self.assertEqual(address.base, 'foo')
self.assertIs(address.offset, 0)
self.assertEqual(rest, '')
def test_complete(self):
line = '# 3ff8 <foo+0x2ff8>'
address, rest = asm2cfg.parse_comment(line, self.target_info)
self.assertIsNot(address, None)
self.assertEqual(address.abs, 0x3ff8) # FIXME: support hex offsets
self.assertEqual(address.base, 'foo')
self.assertEqual(address.offset, 0x2ff8)
self.assertEqual(rest, '')
| test/test_regex.py | 6,323 | Tests of function header regex
Tests of parse_address function
Tests of asm2cfg.parse_body function
Tests of parse_comment function
Tests of parse_address function
Unittests of asm2cfg's regexes
FIXME: support hex offsets | 223 | en | 0.426712 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Franck Cuny <franck@lumberjaph.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: cpanm
short_description: Manages Perl library dependencies.
description:
- Manage Perl library dependencies.
version_added: "1.6"
options:
name:
description:
- The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
required: false
default: null
aliases: ["pkg"]
from_path:
description:
- The local directory from where to install
required: false
default: null
notest:
description:
- Do not run unit tests
required: false
default: false
locallib:
description:
- Specify the install base to install modules
required: false
default: false
mirror:
description:
- Specifies the base URL for the CPAN mirror to use
required: false
default: false
mirror_only:
description:
- Use the mirror's index file instead of the CPAN Meta DB
required: false
default: false
installdeps:
description:
- Only install dependencies
required: false
default: false
version_added: "2.0"
version:
description:
- minimum version of perl module to consider acceptable
required: false
default: false
version_added: "2.1"
system_lib:
description:
- Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
- This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
required: false
default: false
version_added: "2.0"
aliases: ['use_sudo']
executable:
description:
- Override the path to the cpanm executable
required: false
default: null
version_added: "2.1"
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: "Franck Cuny (@franckcuny)"
'''
EXAMPLES = '''
# install Dancer perl package
- cpanm: name=Dancer
# install version 0.99_05 of the Plack perl package
- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz
# install Dancer into the specified locallib
- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib
# install perl dependencies from local directory
- cpanm: from_path=/srv/webapps/my_app/src/
# install Dancer perl package without running the unit tests in indicated locallib
- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib
# install Dancer perl package from a specific mirror
- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/
# install Dancer perl package into the system root path
- cpanm: name=Dancer system_lib=yes
# install Dancer if it's not already installed
# OR the installed version is older than version 1.0
- cpanm: name=Dancer version=1.0
'''
def _is_package_installed(module, name, locallib, cpanm, version):
cmd = ""
if locallib:
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
cmd = "%s perl -e ' use %s" % (cmd, name)
if version:
cmd = "%s %s;'" % (cmd, version)
else:
cmd = "%s;'" % cmd
res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0:
return True
else:
return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
# this code should use "%s" like everything else and just return early but not fixing all of it now.
# don't copy stuff like this
if from_path:
cmd = cpanm + " " + from_path
else:
cmd = cpanm + " " + name
if notest is True:
cmd = cmd + " -n"
if locallib is not None:
cmd = cmd + " -l " + locallib
if mirror is not None:
cmd = cmd + " --mirror " + mirror
if mirror_only is True:
cmd = cmd + " --mirror-only"
if installdeps is True:
cmd = cmd + " --installdeps"
if use_sudo is True:
cmd = cmd + " --sudo"
return cmd
def _get_cpanm_path(module):
if module.params['executable']:
return module.params['executable']
else:
return module.get_bin_path('cpanm', True)
def main():
arg_spec = dict(
name=dict(default=None, required=False, aliases=['pkg']),
from_path=dict(default=None, required=False, type='path'),
notest=dict(default=False, type='bool'),
locallib=dict(default=None, required=False, type='path'),
mirror=dict(default=None, required=False),
mirror_only=dict(default=False, type='bool'),
installdeps=dict(default=False, type='bool'),
system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
version=dict(default=None, required=False),
executable=dict(required=False, type='path'),
)
module = AnsibleModule(
argument_spec=arg_spec,
required_one_of=[['name', 'from_path']],
)
cpanm = _get_cpanm_path(module)
name = module.params['name']
from_path = module.params['from_path']
notest = module.boolean(module.params.get('notest', False))
locallib = module.params['locallib']
mirror = module.params['mirror']
mirror_only = module.params['mirror_only']
installdeps = module.params['installdeps']
use_sudo = module.params['system_lib']
version = module.params['version']
changed = False
installed = _is_package_installed(module, name, locallib, cpanm, version)
if not installed:
cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
if rc_cpanm != 0:
module.fail_json(msg=err_cpanm, cmd=cmd)
if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
changed = True
module.exit_json(changed=changed, binary=cpanm, name=name)
# import module snippets
from ansible.module_utils.basic import *
main()
| ansible/grab001/lib/python2.7/site-packages/ansible/modules/extras/packaging/language/cpanm.py | 6,774 | !/usr/bin/python -*- coding: utf-8 -*- (c) 2012, Franck Cuny <franck@lumberjaph.net> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. this code should use "%s" like everything else and just return early but not fixing all of it now. don't copy stuff like this import module snippets | 857 | en | 0.89165 |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QDialog, QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGroupBox
from PyQt5 import uic
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt
#from PyQt5 import Qt
_ST_DLG = join(dirname(abspath(__file__)), 'startdialog.ui')
class StartDialog(QDialog):
def __init__(self, parent):
super(StartDialog, self).__init__() # Call the inherited classes __init__ method
#super().__init__(parent)
uic.loadUi(_ST_DLG, self)
self.hideText()
self.index = 0
self.labels = [self.label01, self.label02, self.label03, self.label04, self.label05, self.label06]
self.timer = QTimer()
self.timer.timeout.connect(self.serialText)
self.timer.start(1060)
self.setWindowModality(Qt.ApplicationModal)
self.exec_()
@Slot()
def on_ok_clicked(self):
self.timer.stop()
self.close()
def hideText(self):
self.label01.hide()
self.label02.hide()
self.label03.hide()
self.label04.hide()
self.label05.hide()
self.label06.hide()
def serialText(self):
self.labels[self.index].show()
if self.index < 5:
self.index += 1
else:
self.timer.stop() | startdialog.py | 1,374 | from PyQt5 import Qt Call the inherited classes __init__ methodsuper().__init__(parent) | 87 | en | 0.353805 |
from jinja2.environment import Environment
from jinja2.loaders import FileSystemLoader
class _MutatioEnvironmentMeta(type):
"""Collects the mutatio tags into Mongo for the admin page."""
def __init__(cls, name, bases, dct):
super(_MutatioEnvironmentMeta, cls).__init__(name, bases, dct)
class MutatioEnvironment(Environment, metaclass=_MutatioEnvironmentMeta):
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', None)
super(MutatioEnvironment, self).__init__(*args, **kwargs)
class MutatioFileSystemLoader(FileSystemLoader):
def __init__(self, *args, **kwargs):
super(MutatioFileSystemLoader, self).__init__(*args, **kwargs)
# This is too rigid
self.create_tags(args[0])
def create_tags(self):
print("Tags creating......")
| demo/flask_mutatio/environment.py | 821 | Collects the mutatio tags into Mongo for the admin page.
This is too rigid | 76 | en | 0.698156 |
"""
An opportunity to explore how to make an EV3 Robot make sounds.
Authors: Dave Fisher, David Mutchler, Vibha Alangar,
their colleagues, and Leo Schoch-Spana.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import simple_rosebotics as rb
# ------------------------------------------------------------------------------
# DONE: 2. This is an ** OPTIONAL ** exercise.
# Using the DOT trick, add code to make_sounds to make the robot
# make sounds in various ways, using the instance variables of the robot's
# SOUND system.
# ------------------------------------------------------------------------------
def main():
tone_player = rb.ToneMaker()
tone_player.play_tone_sequence([(392, 350, 100), (392, 350, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(587.32, 350, 100), (587.32, 350, 100), (587.32, 350, 100),
(622.26, 250, 100), (466.2, 25, 100), (369.99, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (369.99, 350, 100),
(311.13, 250, 100), (392, 25, 100), (466.16, 350, 100),
(392, 250, 100), (466.16, 25, 100), (587.32, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (392, 350, 100),
(311.13, 250, 100), (466.16, 25, 100), (392.00, 300, 150),
(311.13, 250, 100), (466.16, 25, 100), (392, 700)])
def speak():
speech_player = rb.SpeechMaker()
speech_player.speak('hello I am a robot')
def beep():
beeper = rb.Beeper()
beeper.beep()
speak()
| src/m2_sounds.py | 2,429 | An opportunity to explore how to make an EV3 Robot make sounds.
Authors: Dave Fisher, David Mutchler, Vibha Alangar,
their colleagues, and Leo Schoch-Spana.
DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. ------------------------------------------------------------------------------ DONE: 2. This is an ** OPTIONAL ** exercise. Using the DOT trick, add code to make_sounds to make the robot make sounds in various ways, using the instance variables of the robot's SOUND system. ------------------------------------------------------------------------------ | 576 | en | 0.63498 |
"""
This file offers the methods to automatically retrieve the graph Lysinimicrobium luteum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LysinimicrobiumLuteum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lysinimicrobium luteum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lysinimicrobium luteum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LysinimicrobiumLuteum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| bindings/python/ensmallen/datasets/string/lysinimicrobiumluteum.py | 3,486 | Return new instance of the Lysinimicrobium luteum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lysinimicrobium luteum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
This file offers the methods to automatically retrieve the graph Lysinimicrobium luteum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
pylint: disable=import-error | 2,693 | en | 0.709004 |
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1
| kyu_7/fun_with_lists_length/length.py | 432 | The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
Created by Egor Kostan. GitHub: https://github.com/ikostan LinkedIn: https://www.linkedin.com/in/egor-kostan/ | 227 | en | 0.713314 |
from os.path import dirname, abspath
from datetime import timedelta
from fastapi import FastAPI, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from slowapi import _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from routers.tasks import router_tasks
from routers.checks import router_checks, limiter
from routers.topics import router_topic
from routers.auth import router_users
from database.config import database
from schemas.auth import Token
from utilities.docker_scripts import DockerUtils
from utilities.app_metadata import tags_metadata, app_metadata_description
from utilities.auth_scripts import AuthUtils
# FastAPI app instance
app = FastAPI(title='Autograding-API',
description=app_metadata_description,
version='0.0.1',
contact={
"name": "Maria Hladka",
"url": "https://github.com/imgVOID",
"email": "imgvoid@gmail.com",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
}, openapi_tags=tags_metadata)
# Save main app directory
APP_ROOT = dirname(abspath(__file__))
# Fix Docker dockerfile problems on the app startup
DockerUtils.fix_docker_bug()
# Connecting routers to the app
app.include_router(router_tasks)
app.include_router(router_checks)
app.include_router(router_topic)
app.include_router(router_users)
# Connecting rate limiter to the app
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.post("/auth/token", response_model=Token, summary="Grab the Bearer token")
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = await AuthUtils.authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=AuthUtils.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = await AuthUtils.create_access_token(
data={"sub": user.email}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer"}
| main.py | 2,563 | FastAPI app instance Save main app directory Fix Docker dockerfile problems on the app startup Connecting routers to the app Connecting rate limiter to the app | 159 | en | 0.763794 |
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
#
# mzbuild.py -- script to run materialized benchmarks
import argparse
import csv
import itertools
import multiprocessing
import os
import pathlib
import subprocess
import sys
import typing
import uuid
import webbrowser
def mzbuild_tag(git_ref: str) -> str:
if not git_ref:
return git_ref
try:
return (
subprocess.check_output(
["git", "describe", "--exact-match", git_ref], stderr=subprocess.STDOUT
)
.strip()
.decode()
)
except subprocess.CalledProcessError:
unstable_ref = (
subprocess.check_output(["git", "rev-parse", "--verify", git_ref])
.strip()
.decode()
)
return f"unstable-{unstable_ref}"
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose")
def main(args: argparse.Namespace) -> None:
# Ensure that we are working out of the git directory so that commands, such as git, will work
mz_root = os.environ["MZ_ROOT"]
os.chdir(mz_root)
worker_counts = enumerate_cpu_counts()
if args.no_benchmark_this_checkout:
git_references = args.git_references
else:
git_references = [None, *args.git_references]
if args.verbose:
build_tags = [None, *[mzbuild_tag(ref) for ref in args.git_references]]
print(f"DEBUG: num_iterations={args.num_measurements}")
print(f"DEBUG: worker_counts={worker_counts}")
print(f"DEBUG: mzbuild_tags={build_tags}")
if args.size == "benchmark-ci":
# Explicitly override the worker counts for the CI benchmark
worker_counts = [1]
setup_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"setup-benchmark-{args.size}",
]
run_benchmark = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"run",
f"run-benchmark-{args.size}",
]
field_names = [
"git_revision",
"num_workers",
"iteration",
"seconds_taken",
"rows_per_second",
"grafana_url",
]
results_writer = csv.DictWriter(sys.stdout, field_names)
results_writer.writeheader()
# We use check_output because check_call does not capture output
try:
subprocess.check_output(setup_benchmark, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
if args.web:
try:
web_command = [
mzcompose_location(mz_root),
"--mz-find",
args.composition,
"web",
f"perf-dash-web",
]
output = subprocess.check_output(web_command, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError,) as e:
print(f"Failed to open browser to perf-dash:\n{e.output.decode()}")
raise
iterations = range(0, args.num_measurements)
for (iteration, worker_count, git_ref) in itertools.product(
iterations, worker_counts, git_references
):
# Sadly, environment variables are the only way to pass this information into containers
# started by mzcompose
child_env = os.environ.copy()
child_env["MZ_ROOT"] = mz_root
child_env["MZ_WORKERS"] = str(worker_count)
child_env["MZBENCH_ID"] = args.benchmark_id
child_env["MZBUILD_WAIT_FOR_IMAGE"] = "true"
if git_ref:
child_env["MZBENCH_GIT_REF"] = git_ref
child_env["MZBUILD_MATERIALIZED_TAG"] = mzbuild_tag(git_ref)
try:
output = subprocess.check_output(
run_benchmark, env=child_env, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError,) as e:
# TODO: Don't exit with error on simple benchmark failure
print(
f"Setup benchmark failed! Output from failed command:\n{e.output.decode()}"
)
raise
# TODO: Replace parsing output from mzcompose with reading from a well known file or topic
for line in output.decode().splitlines():
if line.startswith("SUCCESS!"):
for token in line.split(" "):
if token.startswith("seconds_taken="):
seconds_taken = token[len("seconds_taken=") :]
elif token.startswith("rows_per_sec="):
rows_per_second = token[len("rows_per_sec=") :]
elif line.startswith("Grafana URL: "):
grafana_url = line[len("Grafana URL: ") :]
results_writer.writerow(
{
"git_revision": git_ref if git_ref else "None",
"num_workers": worker_count,
"iteration": iteration,
"seconds_taken": seconds_taken,
"rows_per_second": rows_per_second,
"grafana_url": grafana_url,
}
)
def enumerate_cpu_counts() -> typing.List[int]:
"""This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
"""
# 15% overhead and count physical cores only
max_cpus = round(multiprocessing.cpu_count() * 0.425)
num_trials = 4
# Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0
worker_counts = [round(i * max_cpus / num_trials) for i in range(num_trials, 0, -1)]
return list(reversed(sorted(set(worker_counts))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--benchmark-id",
type=str,
default=str(uuid.uuid4()),
help="Pseudo-unique identifier to use for this benchmark",
)
parser.add_argument(
"-n",
"--num-measurements",
type=int,
default=6,
help="Number of times to repeat each benchmark iteration",
)
parser.add_argument(
"-s",
"--size",
type=str,
default="medium",
choices=["medium", "ci", "large"],
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"--no-benchmark-this-checkout",
action="store_true",
help="Don't benchmark the version of materialized in this checkout",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging output"
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="Open a web browser showing results visualizations",
)
parser.add_argument(
"composition",
type=str,
help="Name of the mzcompose composition to run",
)
parser.add_argument(
"git_references",
type=str,
nargs="*",
help="Materialized builds to test as well, identified by git reference",
)
args = parser.parse_args()
main(args)
| misc/python/materialize/cli/mzbench.py | 8,125 | This program prints the number of CPU counts to benchmark on this machine.
We remove some percentage of CPU cores off the top for system / background processing. With
the CPUs that remain, we generate a list of evenly spaced worker counts. The list is limited
by the number of trials desired. This is meant to help us explore the number of CPUs that
should be dedicated to MZ_WORKERS, not as a prescription for the correct values to choose.
On a Macbook with 8 cores, this will return [6, 4, 3, 2].
On a 56 core machine, this returns [24, 18, 12, 6].
On a 96 core machine, this returns [41, 30, 20, 10].
Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
Copyright Materialize, Inc. All rights reserved. Use of this software is governed by the Business Source License included in the LICENSE file at the root of this repository. As of the Change Date specified in that file, in accordance with the Business Source License, use of this software will be governed by the Apache License, Version 2.0. mzbuild.py -- script to run materialized benchmarks Ensure that we are working out of the git directory so that commands, such as git, will work Explicitly override the worker counts for the CI benchmark We use check_output because check_call does not capture output Sadly, environment variables are the only way to pass this information into containers started by mzcompose TODO: Don't exit with error on simple benchmark failure TODO: Replace parsing output from mzcompose with reading from a well known file or topic 15% overhead and count physical cores only Yield the fractional points (4/4, 3/4, ...) between max and 0, not including 0 | 1,679 | en | 0.906497 |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Machine/LamSlotWind.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Machine/LamSlotWind
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .LamSlot import LamSlot
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Machine.LamSlotWind.build_geometry import build_geometry
except ImportError as error:
build_geometry = error
try:
from ..Methods.Machine.LamSlotWind.check import check
except ImportError as error:
check = error
try:
from ..Methods.Machine.LamSlotWind.comp_masses import comp_masses
except ImportError as error:
comp_masses = error
try:
from ..Methods.Machine.LamSlotWind.comp_surfaces import comp_surfaces
except ImportError as error:
comp_surfaces = error
try:
from ..Methods.Machine.LamSlotWind.comp_volumes import comp_volumes
except ImportError as error:
comp_volumes = error
try:
from ..Methods.Machine.LamSlotWind.get_pole_pair_number import get_pole_pair_number
except ImportError as error:
get_pole_pair_number = error
try:
from ..Methods.Machine.LamSlotWind.get_name_phase import get_name_phase
except ImportError as error:
get_name_phase = error
try:
from ..Methods.Machine.LamSlotWind.plot import plot
except ImportError as error:
plot = error
try:
from ..Methods.Machine.LamSlotWind.plot_winding import plot_winding
except ImportError as error:
plot_winding = error
try:
from ..Methods.Machine.LamSlotWind.comp_fill_factor import comp_fill_factor
except ImportError as error:
comp_fill_factor = error
try:
from ..Methods.Machine.LamSlotWind.comp_output_geo import comp_output_geo
except ImportError as error:
comp_output_geo = error
try:
from ..Methods.Machine.LamSlotWind.get_polar_eq import get_polar_eq
except ImportError as error:
get_polar_eq = error
try:
from ..Methods.Machine.LamSlotWind.comp_wind_function import comp_wind_function
except ImportError as error:
comp_wind_function = error
try:
from ..Methods.Machine.LamSlotWind.plot_mmf_unit import plot_mmf_unit
except ImportError as error:
plot_mmf_unit = error
try:
from ..Methods.Machine.LamSlotWind.comp_resistance_wind import comp_resistance_wind
except ImportError as error:
comp_resistance_wind = error
try:
from ..Methods.Machine.LamSlotWind.comp_angle_d_axis import comp_angle_d_axis
except ImportError as error:
comp_angle_d_axis = error
try:
from ..Methods.Machine.LamSlotWind.comp_mmf_unit import comp_mmf_unit
except ImportError as error:
comp_mmf_unit = error
try:
from ..Methods.Machine.LamSlotWind.comp_rot_dir import comp_rot_dir
except ImportError as error:
comp_rot_dir = error
try:
from ..Methods.Machine.LamSlotWind.comp_lengths_winding import comp_lengths_winding
except ImportError as error:
comp_lengths_winding = error
try:
from ..Methods.Machine.LamSlotWind.comp_number_phase_eq import comp_number_phase_eq
except ImportError as error:
comp_number_phase_eq = error
try:
from ..Methods.Machine.LamSlotWind.comp_periodicity import comp_periodicity
except ImportError as error:
comp_periodicity = error
from ._check import InitUnKnowClassError
from .Winding import Winding
from .Slot import Slot
from .Material import Material
from .Hole import Hole
from .Notch import Notch
class LamSlotWind(LamSlot):
"""Lamination with Slot filled with winding"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Machine.LamSlotWind.build_geometry
if isinstance(build_geometry, ImportError):
build_geometry = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method build_geometry: "
+ str(build_geometry)
)
)
)
else:
build_geometry = build_geometry
# cf Methods.Machine.LamSlotWind.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use LamSlotWind method check: " + str(check))
)
)
else:
check = check
# cf Methods.Machine.LamSlotWind.comp_masses
if isinstance(comp_masses, ImportError):
comp_masses = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_masses: " + str(comp_masses)
)
)
)
else:
comp_masses = comp_masses
# cf Methods.Machine.LamSlotWind.comp_surfaces
if isinstance(comp_surfaces, ImportError):
comp_surfaces = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_surfaces: " + str(comp_surfaces)
)
)
)
else:
comp_surfaces = comp_surfaces
# cf Methods.Machine.LamSlotWind.comp_volumes
if isinstance(comp_volumes, ImportError):
comp_volumes = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_volumes: " + str(comp_volumes)
)
)
)
else:
comp_volumes = comp_volumes
# cf Methods.Machine.LamSlotWind.get_pole_pair_number
if isinstance(get_pole_pair_number, ImportError):
get_pole_pair_number = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method get_pole_pair_number: "
+ str(get_pole_pair_number)
)
)
)
else:
get_pole_pair_number = get_pole_pair_number
# cf Methods.Machine.LamSlotWind.get_name_phase
if isinstance(get_name_phase, ImportError):
get_name_phase = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method get_name_phase: "
+ str(get_name_phase)
)
)
)
else:
get_name_phase = get_name_phase
# cf Methods.Machine.LamSlotWind.plot
if isinstance(plot, ImportError):
plot = property(
fget=lambda x: raise_(
ImportError("Can't use LamSlotWind method plot: " + str(plot))
)
)
else:
plot = plot
# cf Methods.Machine.LamSlotWind.plot_winding
if isinstance(plot_winding, ImportError):
plot_winding = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method plot_winding: " + str(plot_winding)
)
)
)
else:
plot_winding = plot_winding
# cf Methods.Machine.LamSlotWind.comp_fill_factor
if isinstance(comp_fill_factor, ImportError):
comp_fill_factor = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_fill_factor: "
+ str(comp_fill_factor)
)
)
)
else:
comp_fill_factor = comp_fill_factor
# cf Methods.Machine.LamSlotWind.comp_output_geo
if isinstance(comp_output_geo, ImportError):
comp_output_geo = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_output_geo: "
+ str(comp_output_geo)
)
)
)
else:
comp_output_geo = comp_output_geo
# cf Methods.Machine.LamSlotWind.get_polar_eq
if isinstance(get_polar_eq, ImportError):
get_polar_eq = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method get_polar_eq: " + str(get_polar_eq)
)
)
)
else:
get_polar_eq = get_polar_eq
# cf Methods.Machine.LamSlotWind.comp_wind_function
if isinstance(comp_wind_function, ImportError):
comp_wind_function = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_wind_function: "
+ str(comp_wind_function)
)
)
)
else:
comp_wind_function = comp_wind_function
# cf Methods.Machine.LamSlotWind.plot_mmf_unit
if isinstance(plot_mmf_unit, ImportError):
plot_mmf_unit = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method plot_mmf_unit: " + str(plot_mmf_unit)
)
)
)
else:
plot_mmf_unit = plot_mmf_unit
# cf Methods.Machine.LamSlotWind.comp_resistance_wind
if isinstance(comp_resistance_wind, ImportError):
comp_resistance_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_resistance_wind: "
+ str(comp_resistance_wind)
)
)
)
else:
comp_resistance_wind = comp_resistance_wind
# cf Methods.Machine.LamSlotWind.comp_angle_d_axis
if isinstance(comp_angle_d_axis, ImportError):
comp_angle_d_axis = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_angle_d_axis: "
+ str(comp_angle_d_axis)
)
)
)
else:
comp_angle_d_axis = comp_angle_d_axis
# cf Methods.Machine.LamSlotWind.comp_mmf_unit
if isinstance(comp_mmf_unit, ImportError):
comp_mmf_unit = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_mmf_unit: " + str(comp_mmf_unit)
)
)
)
else:
comp_mmf_unit = comp_mmf_unit
# cf Methods.Machine.LamSlotWind.comp_rot_dir
if isinstance(comp_rot_dir, ImportError):
comp_rot_dir = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_rot_dir: " + str(comp_rot_dir)
)
)
)
else:
comp_rot_dir = comp_rot_dir
# cf Methods.Machine.LamSlotWind.comp_lengths_winding
if isinstance(comp_lengths_winding, ImportError):
comp_lengths_winding = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_lengths_winding: "
+ str(comp_lengths_winding)
)
)
)
else:
comp_lengths_winding = comp_lengths_winding
# cf Methods.Machine.LamSlotWind.comp_number_phase_eq
if isinstance(comp_number_phase_eq, ImportError):
comp_number_phase_eq = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_number_phase_eq: "
+ str(comp_number_phase_eq)
)
)
)
else:
comp_number_phase_eq = comp_number_phase_eq
# cf Methods.Machine.LamSlotWind.comp_periodicity
if isinstance(comp_periodicity, ImportError):
comp_periodicity = property(
fget=lambda x: raise_(
ImportError(
"Can't use LamSlotWind method comp_periodicity: "
+ str(comp_periodicity)
)
)
)
else:
comp_periodicity = comp_periodicity
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
Ksfill=None,
winding=-1,
slot=-1,
L1=0.35,
mat_type=-1,
Nrvd=0,
Wrvd=0,
Kf1=0.95,
is_internal=True,
Rint=0,
Rext=1,
is_stator=True,
axial_vent=-1,
notch=-1,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "Ksfill" in list(init_dict.keys()):
Ksfill = init_dict["Ksfill"]
if "winding" in list(init_dict.keys()):
winding = init_dict["winding"]
if "slot" in list(init_dict.keys()):
slot = init_dict["slot"]
if "L1" in list(init_dict.keys()):
L1 = init_dict["L1"]
if "mat_type" in list(init_dict.keys()):
mat_type = init_dict["mat_type"]
if "Nrvd" in list(init_dict.keys()):
Nrvd = init_dict["Nrvd"]
if "Wrvd" in list(init_dict.keys()):
Wrvd = init_dict["Wrvd"]
if "Kf1" in list(init_dict.keys()):
Kf1 = init_dict["Kf1"]
if "is_internal" in list(init_dict.keys()):
is_internal = init_dict["is_internal"]
if "Rint" in list(init_dict.keys()):
Rint = init_dict["Rint"]
if "Rext" in list(init_dict.keys()):
Rext = init_dict["Rext"]
if "is_stator" in list(init_dict.keys()):
is_stator = init_dict["is_stator"]
if "axial_vent" in list(init_dict.keys()):
axial_vent = init_dict["axial_vent"]
if "notch" in list(init_dict.keys()):
notch = init_dict["notch"]
# Set the properties (value check and convertion are done in setter)
self.Ksfill = Ksfill
self.winding = winding
# Call LamSlot init
super(LamSlotWind, self).__init__(
slot=slot,
L1=L1,
mat_type=mat_type,
Nrvd=Nrvd,
Wrvd=Wrvd,
Kf1=Kf1,
is_internal=is_internal,
Rint=Rint,
Rext=Rext,
is_stator=is_stator,
axial_vent=axial_vent,
notch=notch,
)
# The class is frozen (in LamSlot init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
LamSlotWind_str = ""
# Get the properties inherited from LamSlot
LamSlotWind_str += super(LamSlotWind, self).__str__()
LamSlotWind_str += "Ksfill = " + str(self.Ksfill) + linesep
if self.winding is not None:
tmp = self.winding.__str__().replace(linesep, linesep + "\t").rstrip("\t")
LamSlotWind_str += "winding = " + tmp
else:
LamSlotWind_str += "winding = None" + linesep + linesep
return LamSlotWind_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from LamSlot
if not super(LamSlotWind, self).__eq__(other):
return False
if other.Ksfill != self.Ksfill:
return False
if other.winding != self.winding:
return False
return True
def compare(self, other, name="self"):
"""Compare two objects and return list of differences"""
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from LamSlot
diff_list.extend(super(LamSlotWind, self).compare(other, name=name))
if other._Ksfill != self._Ksfill:
diff_list.append(name + ".Ksfill")
if (other.winding is None and self.winding is not None) or (
other.winding is not None and self.winding is None
):
diff_list.append(name + ".winding None mismatch")
elif self.winding is not None:
diff_list.extend(
self.winding.compare(other.winding, name=name + ".winding")
)
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from LamSlot
S += super(LamSlotWind, self).__sizeof__()
S += getsizeof(self.Ksfill)
S += getsizeof(self.winding)
return S
def as_dict(self):
"""Convert this object in a json seriable dict (can be use in __init__)"""
# Get the properties inherited from LamSlot
LamSlotWind_dict = super(LamSlotWind, self).as_dict()
LamSlotWind_dict["Ksfill"] = self.Ksfill
if self.winding is None:
LamSlotWind_dict["winding"] = None
else:
LamSlotWind_dict["winding"] = self.winding.as_dict()
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
LamSlotWind_dict["__class__"] = "LamSlotWind"
return LamSlotWind_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.Ksfill = None
if self.winding is not None:
self.winding._set_None()
# Set to None the properties inherited from LamSlot
super(LamSlotWind, self)._set_None()
def _get_Ksfill(self):
"""getter of Ksfill"""
return self._Ksfill
def _set_Ksfill(self, value):
"""setter of Ksfill"""
check_var("Ksfill", value, "float", Vmin=0, Vmax=1)
self._Ksfill = value
Ksfill = property(
fget=_get_Ksfill,
fset=_set_Ksfill,
doc=u"""Imposed Slot Fill factor (if None, will be computed according to the winding and the slot)
:Type: float
:min: 0
:max: 1
""",
)
def _get_winding(self):
"""getter of winding"""
return self._winding
def _set_winding(self, value):
"""setter of winding"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "winding"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Winding()
check_var("winding", value, "Winding")
self._winding = value
if self._winding is not None:
self._winding.parent = self
winding = property(
fget=_get_winding,
fset=_set_winding,
doc=u"""Lamination's Winding
:Type: Winding
""",
)
| pyleecan/Classes/LamSlotWind.py | 19,954 | Lamination with Slot filled with winding
Compare two objects (skip parent)
Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object
Return the size in memory of the object (including all subobject)
Convert this object in a readeable string (for print)
getter of Ksfill
getter of winding
setter of Ksfill
Set all the properties to None (except pyleecan object)
setter of winding
Convert this object in a json seriable dict (can be use in __init__)
Compare two objects and return list of differences
Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Machine/LamSlotWind
-*- coding: utf-8 -*- File generated according to Generator/ClassesRef/Machine/LamSlotWind.csv WARNING! All changes made in this file will be lost! Import all class method Try/catch to remove unnecessary dependencies in unused method Check ImportError to remove unnecessary dependencies in unused method cf Methods.Machine.LamSlotWind.build_geometry cf Methods.Machine.LamSlotWind.check cf Methods.Machine.LamSlotWind.comp_masses cf Methods.Machine.LamSlotWind.comp_surfaces cf Methods.Machine.LamSlotWind.comp_volumes cf Methods.Machine.LamSlotWind.get_pole_pair_number cf Methods.Machine.LamSlotWind.get_name_phase cf Methods.Machine.LamSlotWind.plot cf Methods.Machine.LamSlotWind.plot_winding cf Methods.Machine.LamSlotWind.comp_fill_factor cf Methods.Machine.LamSlotWind.comp_output_geo cf Methods.Machine.LamSlotWind.get_polar_eq cf Methods.Machine.LamSlotWind.comp_wind_function cf Methods.Machine.LamSlotWind.plot_mmf_unit cf Methods.Machine.LamSlotWind.comp_resistance_wind cf Methods.Machine.LamSlotWind.comp_angle_d_axis cf Methods.Machine.LamSlotWind.comp_mmf_unit cf Methods.Machine.LamSlotWind.comp_rot_dir cf Methods.Machine.LamSlotWind.comp_lengths_winding cf Methods.Machine.LamSlotWind.comp_number_phase_eq cf Methods.Machine.LamSlotWind.comp_periodicity save and copy methods are available in all object get_logger method is available in all object Load from a file Initialisation by dict Overwrite default value with init_dict content Set the properties (value check and convertion are done in setter) Call LamSlot init The class is frozen (in LamSlot init), for now it's impossible to add new properties Get the properties inherited from LamSlot Check the properties inherited from LamSlot Check the properties inherited from LamSlot Full size of the object Get size of the properties inherited from LamSlot Get the properties inherited from LamSlot The class name is added to the dict for deserialisation purpose Overwrite the mother class name Set to None the properties inherited from LamSlot Load from file Default constructor | 3,044 | en | 0.680798 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import nbconvert
import nbformat
from packaging import version
# Bokeh imports
from bokeh._testing.util.filesystem import with_temporary_file
from bokeh.document import Document
# Module under test
import bokeh.application.handlers.notebook as bahn # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def with_script_contents(contents, func):
def with_file_object(f):
nbsource = nbformat.writes(contents)
f.write(nbsource.encode("UTF-8"))
f.flush()
func(f.name)
with_temporary_file(with_file_object)
class Test_NotebookHandler:
# Public methods ----------------------------------------------------------
def test_runner_strips_line_magics(self, ipython) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
source.cells.append(nbformat.v4.new_code_cell('%time'))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_strips_cell_magics(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
code = '%%timeit\n1+1'
source.cells.append(nbformat.v4.new_code_cell(code))
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
assert handler._runner.failed is False
with_script_contents(source, load)
def test_runner_uses_source_from_filename(self) -> None:
doc = Document()
source = nbformat.v4.new_notebook()
result = {}
def load(filename):
handler = bahn.NotebookHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
result['filename'] = filename
with_script_contents(source, load)
assert result['handler']._runner.path == result['filename']
if version.parse(nbconvert.__version__) < version.parse("5.4"):
expected_source = "\n# coding: utf-8\n"
else:
expected_source = "#!/usr/bin/env python\n# coding: utf-8\n"
assert result['handler']._runner.source == expected_source
assert not doc.roots
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| tests/unit/bokeh/application/handlers/test_notebook__handlers.py | 3,876 | ----------------------------------------------------------------------------- Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. All rights reserved. The full license is in the file LICENSE.txt, distributed with this software.---------------------------------------------------------------------------------------------------------------------------------------------------------- Boilerplate----------------------------------------------------------------------------- isort:skip----------------------------------------------------------------------------- Imports----------------------------------------------------------------------------- External imports Bokeh imports Module under test isort:skip----------------------------------------------------------------------------- Setup---------------------------------------------------------------------------------------------------------------------------------------------------------- General API---------------------------------------------------------------------------------------------------------------------------------------------------------- Dev API----------------------------------------------------------------------------- Public methods --------------------------------------------------------------------------------------------------------------------------------------- Private API---------------------------------------------------------------------------------------------------------------------------------------------------------- Code----------------------------------------------------------------------------- | 1,605 | en | 0.198437 |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Generate rules for snapshoting"""
from ggrc.snapshotter.datastructures import Attr
class Types(object):
"""Get default types for snapshotting"""
# pylint: disable=too-few-public-methods
all = {
"AccessGroup",
"AccountBalance",
"Contract",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Policy",
"Process",
"Product",
"Project",
"Regulation",
"Requirement",
"Standard",
"System",
"Vendor",
"Risk",
"TechnologyEnvironment",
"Threat",
"Metric",
"ProductGroup",
"KeyReport",
}
parents = {
"Audit",
}
scoped = {
"Assessment",
}
trans_scope = {
"Issue",
}
ignore = {
"Assessment",
"AssessmentTemplate",
"Issue",
"Workflow",
"Audit",
"Person"
}
external = {
"AccessGroup",
"AccountBalance",
"DataAsset",
"Facility",
"KeyReport",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"Vendor",
"TechnologyEnvironment",
"Control",
"Risk",
}
@classmethod
def internal_types(cls):
"""Return set of internal type names."""
return cls.all - cls.external
@classmethod
def external_types(cls):
"""Return set of external type names."""
return cls.external
class Rules(object):
"""Returns a dictionary of rules
Expected format of rule_list is the following:
[
{"master_object_type", ...},
{"first degree object types"},
{"second degree object types"}
]
For all master objects of type master_object_type, it will gather all
related objects from first degree object types (which can be related via
relationships table or via direct mapping (in which case you should wrap
the attribute name in Attr) and gather all of first degrees related objects
of the types listed in the second degree object type.
Example:
[
{"object_type_1", ["object_type_2", ...]},
{"type_of_related_object_or_attribute", ["second..."]},
{"type_of_object_to_snapshot_1", ["type_2", ...]}
]
From it, it will build a dictionary of format:
{
"parent_type": {
"fst": {"type_of_related_object_or_attribute_1", ...},
"snd": {"type_1", "type_2", ...}
},
...
}
"""
# pylint: disable=too-few-public-methods
def __init__(self, rule_list):
self.rules = dict()
for parents, fstdeg, snddeg in rule_list:
for parent in parents:
self.rules[parent] = {
"fst": fstdeg,
"snd": snddeg
}
DEFAULT_RULE_LIST = [
[
{"Audit"},
{Attr("program")},
Types.all - Types.ignore
]
]
def get_rules(rule_list=None):
"""Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
"""
if not rule_list:
rule_list = DEFAULT_RULE_LIST
return Rules(rule_list)
| src/ggrc/snapshotter/rules.py | 3,235 | Returns a dictionary of rules
Expected format of rule_list is the following:
[
{"master_object_type", ...},
{"first degree object types"},
{"second degree object types"}
]
For all master objects of type master_object_type, it will gather all
related objects from first degree object types (which can be related via
relationships table or via direct mapping (in which case you should wrap
the attribute name in Attr) and gather all of first degrees related objects
of the types listed in the second degree object type.
Example:
[
{"object_type_1", ["object_type_2", ...]},
{"type_of_related_object_or_attribute", ["second..."]},
{"type_of_object_to_snapshot_1", ["type_2", ...]}
]
From it, it will build a dictionary of format:
{
"parent_type": {
"fst": {"type_of_related_object_or_attribute_1", ...},
"snd": {"type_1", "type_2", ...}
},
...
}
Get default types for snapshotting
Return set of external type names.
Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
Return set of internal type names.
Generate rules for snapshoting
Copyright (C) 2020 Google Inc. Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> pylint: disable=too-few-public-methods pylint: disable=too-few-public-methods | 1,370 | en | 0.72911 |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.0 Real-time Push Cloud API
## -----------------------------------
try: import json
except ImportError: import simplejson as json
import time
import hashlib
import urllib2
import uuid
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com',
pres_uuid = None
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.limit = 1800
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.ssl = ssl_on
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
self.uuid = pres_uuid or str(uuid.uuid4())
if not isinstance(self.uuid, basestring):
raise AttributeError("pres_uuid must be a string")
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
return [ 0, 'Missing Channel or Message' ]
## Capture User Input
channel = str(args['channel'])
message = json.dumps(args['message'], separators=(',',':'))
## Sign Message
if self.secret_key :
signature = hashlib.md5('/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
])).hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
])
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
## Begin Subscribe
while True :
timetoken = 'timetoken' in args and args['timetoken'] or 0
try :
## Wait for Message
response = self._request(self._encode([
'subscribe',
subscribe_key,
channel,
'0',
str(timetoken)
])+['?uuid='+self.uuid], encode=False)
messages = response[0]
args['timetoken'] = response[1]
## If it was a timeout
if not len(messages) :
continue
## Run user Callback and Reconnect if user permits.
for message in messages :
if not callback(message) :
return
except Exception:
time.sleep(1)
return True
def presence( self, args ) :
"""
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
raise Exception('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
raise Exception('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
subscribe_key = args.get('subscribe_key') or self.subscribe_key
return self.subscribe({'channel': channel+'-pnpres', 'subscribe_key':subscribe_key, 'callback': callback})
def here_now( self, args ) :
"""
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
"""
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get Presence Here Now
return self._request([
'v2','presence',
'sub_key', self.subscribe_key,
'channel', channel
]);
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
raise Exception('Missing Channel')
return False
## Get History
return self._request([
'history',
self.subscribe_key,
channel,
'0',
str(limit)
]);
def time(self) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
"""
return self._request([
'time',
'0'
])[0]
def _encode( self, request ) :
return [
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request]
def _request( self, request, origin = None, encode = True ) :
## Build URL
url = (origin or self.origin) + '/' + "/".join(
encode and self._encode(request) or request
)
## Send Request Expecting JSONP Response
try:
try: usock = urllib2.urlopen( url, None, 200 )
except TypeError: usock = urllib2.urlopen( url, None )
response = usock.read()
usock.close()
return json.loads( response )
except:
return None
| python/3.2/Pubnub.py | 9,165 | #**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key optional key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#* @param string pres_uuid optional identifier for presence (auto-generated if not supplied)
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
#**
#* Here Now
#*
#* Load current occupancy from a channel.
#*
#* @param array args with 'channel'.
#* @return mixed false on fail, array on success.
#*
## Presence Example
here_now = pubnub.here_now({
'channel' : 'hello_world',
})
print(here_now['occupancy'])
print(here_now['uuids'])
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
#**
#* presence
#*
#* This is BLOCKING.
#* Listen for presence events on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Presence Example
def pres_event(message) :
print(message)
return True
pubnub.presence({
'channel' : 'hello_world',
'callback' : receive
})
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
info = pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
}
})
print(info)
#**
#* Subscribe
#*
#* This is BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and callback.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
pubnub.subscribe({
'channel' : 'hello_world',
'callback' : receive
})
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
timestamp = pubnub.time()
print(timestamp)
www.pubnub.com - PubNub Real-time push service in the cloud. coding=utf8 PubNub Real-time Push APIs and Notifications Framework Copyright (c) 2010 Stephen Blum http://www.pubnub.com/ ----------------------------------- PubNub 3.0 Real-time Push Cloud API ----------------------------------- Fail if bad input. Capture User Input Sign Message Send Message Fail if missing channel Fail if missing callback Capture User Input Begin Subscribe Wait for Message If it was a timeout Run user Callback and Reconnect if user permits. Fail if missing channel Fail if missing callback Capture User Input Fail if bad input. Get Presence Here Now Capture User Input Fail if bad input. Get History Build URL Send Request Expecting JSONP Response | 2,957 | en | 0.355728 |
# -*- coding: utf-8 -*-
"""
=======================
Cmap and Custom Bins
=======================
Invoke the cmap colour scheme and choose how many bins to use with your data.
By default, the cmap colour scheme is used if you have many, many chains. You can
enable it before that point if you wish and pass in the cmap you want to use.
You can also pick how many bins you want to display your data with.
You can see that in this example, we pick too many bins and would not get good
summaries. If you simply want more (or less) bins than the default estimate,
if you input a float instead of an integer, the number of bins will simply scale
by that amount. For example, if the estimated picks 20 bins, and you set ``bins=1.5``
your plots and summaries would be calculated with 30 bins.
"""
import numpy as np
from numpy.random import normal, random, multivariate_normal
from chainconsumer import ChainConsumer
np.random.seed(0)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data2 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data3 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
cov = 0.3 * random(size=(3, 3)) + np.identity(3)
data4 = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)
c = ChainConsumer()
c.add_chain(data, name="A")
c.add_chain(data2, name="B")
c.add_chain(data3, name="C")
c.add_chain(data4, name="D")
c.configure(bins=50, cmap="plasma")
fig = c.plotter.plot(figsize=0.75) # Also making the figure 75% of its original size, for fun
fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
| examples/customisations/plot_rainbow_serif_bins.py | 1,797 | =======================
Cmap and Custom Bins
=======================
Invoke the cmap colour scheme and choose how many bins to use with your data.
By default, the cmap colour scheme is used if you have many, many chains. You can
enable it before that point if you wish and pass in the cmap you want to use.
You can also pick how many bins you want to display your data with.
You can see that in this example, we pick too many bins and would not get good
summaries. If you simply want more (or less) bins than the default estimate,
if you input a float instead of an integer, the number of bins will simply scale
by that amount. For example, if the estimated picks 20 bins, and you set ``bins=1.5``
your plots and summaries would be calculated with 30 bins.
-*- coding: utf-8 -*- Also making the figure 75% of its original size, for fun Resize fig for doco. You don't need this. | 883 | en | 0.931906 |
from typing import List
from mdrsl.data_structures.rules.generalized_rule_part import GeneralizedAntecedent
from mdrsl.data_structures.item import Literal, NEQLiteral, EQLiteral
from mdrsl.rule_generation.decision_tree_conversion.attribute_id_to_name_conversion import DecisionTreeFeatureIDConverter
class TreeEdge:
def __init__(self, feature_id: int, threshold: float, is_left: bool):
self.feature_id: int = feature_id
self.threshold: float = threshold
self.is_left: bool = is_left
def __str__(self):
output_str = 'f(' + str(self.feature_id) + ')'
if self.is_left:
output_str += '<='
else:
output_str += '>'
output_str += str(self.threshold)
if self.is_left:
output_str += ' (L)'
else:
output_str += ' (R)'
return output_str
def __repr__(self):
return self.__str__()
class AntecedentBuilder:
def __init__(self, one_hot_encoded_feature_names: List[str], ohe_prefix_separator: str):
self.ohe_prefix_separator: str = ohe_prefix_separator
self.decision_tree_feature_id_converter = DecisionTreeFeatureIDConverter(one_hot_encoded_feature_names)
def convert_edges(self, edges: List[TreeEdge]):
antecedent_literals: List[Literal] = []
for tree_edge in edges:
lit = self.convert(tree_edge)
antecedent_literals.append(lit)
antecedent = GeneralizedAntecedent(antecedent_literals)
return antecedent
def convert(self, tree_edge: TreeEdge):
if tree_edge.threshold != 0.5:
print("Unexpected tree edge threshold value: " + str(tree_edge.threshold))
# find the descriptive attr as used for input for the decision tree
dt_descriptive_attribute = self.decision_tree_feature_id_converter.convert(tree_edge.feature_id)
splitted_string = dt_descriptive_attribute.split(self.ohe_prefix_separator)
if len(splitted_string) == 1:
feature_name = dt_descriptive_attribute
if tree_edge.is_left:
feature_value = str(0)
else:
feature_value = str(1)
return EQLiteral(attribute=feature_name, value=feature_value)
elif len(splitted_string) == 2:
feature_name = splitted_string[0]
feature_value = splitted_string[1]
if tree_edge.is_left:
return NEQLiteral(attribute=feature_name, value=feature_value)
else:
return EQLiteral(attribute=feature_name, value=feature_value)
else:
raise Exception("Unexpected feature name:" + dt_descriptive_attribute)
| mdrsl/rule_generation/decision_tree_conversion/tree_edge.py | 2,703 | find the descriptive attr as used for input for the decision tree | 65 | en | 0.91403 |
import boto3
import logging
import argparse
import requests
import xmltodict
import asyncio
import datetime
import time
import re
import json
import math
from sqlalchemy import func
from dateutil.relativedelta import relativedelta
from requests.exceptions import ConnectionError, ReadTimeout
from urllib3.exceptions import ReadTimeoutError
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.exc import IntegrityError
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import SubTierAgency, CountryCode, States, CountyCode, Zips, DUNS
from dataactcore.models.stagingModels import DetachedAwardProcurement
from dataactcore.models.jobModels import FPDSUpdate
from dataactcore.utils.business_categories import get_business_categories
from dataactcore.models.jobModels import Submission # noqa
from dataactcore.models.userModel import User # noqa
from dataactvalidator.health_check import create_app
from dataactvalidator.filestreaming.csvLocalWriter import CsvLocalWriter
feed_url = "https://www.fpds.gov/ezsearch/FEEDS/ATOM?FEEDNAME=PUBLIC&templateName=1.5.2&q="
delete_url = "https://www.fpds.gov/ezsearch/FEEDS/ATOM?FEEDNAME=DELETED&templateName=1.5.2&q="
country_code_map = {'USA': 'US', 'ASM': 'AS', 'GUM': 'GU', 'MNP': 'MP', 'PRI': 'PR', 'VIR': 'VI', 'FSM': 'FM',
'MHL': 'MH', 'PLW': 'PW', 'XBK': 'UM', 'XHO': 'UM', 'XJV': 'UM', 'XJA': 'UM', 'XKR': 'UM',
'XPL': 'UM', 'XMW': 'UM', 'XWK': 'UM'}
FPDS_NAMESPACES = {'http://www.fpdsng.com/FPDS': None,
'http://www.w3.org/2005/Atom': None,
'https://www.fpds.gov/FPDS': None}
# Used for asyncio get requests against the ATOM feed
MAX_ENTRIES = 10
MAX_REQUESTS_AT_ONCE = 100
logger = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
def list_data(data):
if isinstance(data, dict):
# make a list so it's consistent
data = [data, ]
return data
def extract_text(data_val):
if type(data_val) is not str:
data_val = data_val['#text']
# If it's now a string, we want to strip it
if type(data_val) is str:
data_val = data_val.strip()
return data_val
def is_valid_zip(zip_code):
if re.match('^\d{5}(-?\d{4})?$', zip_code):
return True
return False
def get_county_by_zip(sess, zip_code):
# if the zip code is not a valid US zip, toss the entire zip
if not is_valid_zip(zip_code):
return None
zip_data = None
# if we have a 9 digit code, grab the first match for 9 digit zips
if len(zip_code) > 5:
zip_data = sess.query(Zips).filter_by(zip5=zip_code[:5], zip_last4=zip_code[-4:]).first()
# if it's not 9 digits or we found no results from the 9 digit we received
if not zip_data:
zip_data = sess.query(Zips).filter_by(zip5=zip_code[:5]).first()
# if we found results at any point, return the county code from it
if zip_data:
return zip_data.county_number
return None
def award_id_values(data, obj):
""" Get values from the awardID level of the xml """
value_map = {'modNumber': 'award_modification_amendme',
'transactionNumber': 'transaction_number',
'PIID': 'piid',
'agencyID': 'agency_id'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['awardContractID'][key])
except (KeyError, TypeError):
obj[value] = None
value_map = {'agencyID': 'referenced_idv_agency_iden',
'modNumber': 'referenced_idv_modificatio',
'PIID': 'parent_award_id'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['referencedIDVID'][key])
except (KeyError, TypeError):
obj[value] = None
# get agencyID name
try:
obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name'])
except (KeyError, TypeError):
obj['referenced_idv_agency_desc'] = None
return obj
def contract_id_values(data, obj):
""" Get values from the contractID level of the xml """
value_map = {'modNumber': 'award_modification_amendme',
'PIID': 'piid',
'agencyID': 'agency_id'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['IDVID'][key])
except (KeyError, TypeError):
obj[value] = None
value_map = {'agencyID': 'referenced_idv_agency_iden',
'modNumber': 'referenced_idv_modificatio',
'PIID': 'parent_award_id'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['referencedIDVID'][key])
except (KeyError, TypeError):
obj[value] = None
# get agencyID name
try:
obj['referenced_idv_agency_desc'] = extract_text(data['referencedIDVID']['agencyID']['@name'])
except (KeyError, TypeError):
obj['referenced_idv_agency_desc'] = None
return obj
def competition_values(data, obj):
""" Get values from the competition level of the xml """
value_map = {'A76Action': 'a_76_fair_act_action',
'commercialItemAcquisitionProcedures': 'commercial_item_acquisitio',
'commercialItemTestProgram': 'commercial_item_test_progr',
'evaluatedPreference': 'evaluated_preference',
'extentCompeted': 'extent_competed',
'fedBizOpps': 'fed_biz_opps',
'localAreaSetAside': 'local_area_set_aside',
'numberOfOffersReceived': 'number_of_offers_received',
'priceEvaluationPercentDifference': 'price_evaluation_adjustmen',
'reasonNotCompeted': 'other_than_full_and_open_c',
'research': 'research',
'smallBusinessCompetitivenessDemonstrationProgram': 'small_business_competitive',
'solicitationProcedures': 'solicitation_procedures',
'statutoryExceptionToFairOpportunity': 'fair_opportunity_limited_s',
'typeOfSetAside': 'type_set_aside'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'A76Action': 'a_76_fair_act_action_desc',
'commercialItemAcquisitionProcedures': 'commercial_item_acqui_desc',
'commercialItemTestProgram': 'commercial_item_test_desc',
'evaluatedPreference': 'evaluated_preference_desc',
'extentCompeted': 'extent_compete_description',
'fedBizOpps': 'fed_biz_opps_description',
'localAreaSetAside': 'local_area_set_aside_desc',
'reasonNotCompeted': 'other_than_full_and_o_desc',
'research': 'research_description',
'solicitationProcedures': 'solicitation_procedur_desc',
'statutoryExceptionToFairOpportunity': 'fair_opportunity_limi_desc',
'typeOfSetAside': 'type_set_aside_description'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
return obj
def contract_data_values(data, obj, atom_type):
""" Get values from the contractData level of the xml """
value_map = {'consolidatedContract': 'consolidated_contract',
'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitarian_o',
'contractFinancing': 'contract_financing',
'costAccountingStandardsClause': 'cost_accounting_standards',
'costOrPricingData': 'cost_or_pricing_data',
'descriptionOfContractRequirement': 'award_description',
'GFE-GFP': 'government_furnished_prope',
'inherentlyGovernmentalFunction': 'inherently_government_func',
'majorProgramCode': 'major_program',
'multiYearContract': 'multi_year_contract',
'nationalInterestActionCode': 'national_interest_action',
'numberOfActions': 'number_of_actions',
'performanceBasedServiceContract': 'performance_based_service',
'programAcronym': 'program_acronym',
'purchaseCardAsPaymentMethod': 'purchase_card_as_payment_m',
'reasonForModification': 'action_type',
'referencedIDVMultipleOrSingle': 'referenced_mult_or_single',
'referencedIDVType': 'referenced_idv_type',
'seaTransportation': 'sea_transportation',
'solicitationID': 'solicitation_identifier',
'typeOfContractPricing': 'type_of_contract_pricing',
'typeOfIDC': 'type_of_idc',
'undefinitizedAction': 'undefinitized_action'}
if atom_type == "award":
value_map['contractActionType'] = 'contract_award_type'
else:
value_map['contractActionType'] = 'idv_type'
value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_award_i'
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'consolidatedContract': 'consolidated_contract_desc',
'contingencyHumanitarianPeacekeepingOperation': 'contingency_humanitar_desc',
'contractFinancing': 'contract_financing_descrip',
'costAccountingStandardsClause': 'cost_accounting_stand_desc',
'costOrPricingData': 'cost_or_pricing_data_desc',
'GFE-GFP': 'government_furnished_desc',
'inherentlyGovernmentalFunction': 'inherently_government_desc',
'multiYearContract': 'multi_year_contract_desc',
'nationalInterestActionCode': 'national_interest_desc',
'performanceBasedServiceContract': 'performance_based_se_desc',
'purchaseCardAsPaymentMethod': 'purchase_card_as_paym_desc',
'reasonForModification': 'action_type_description',
'referencedIDVMultipleOrSingle': 'referenced_mult_or_si_desc',
'referencedIDVType': 'referenced_idv_type_desc',
'seaTransportation': 'sea_transportation_desc',
'typeOfContractPricing': 'type_of_contract_pric_desc',
'typeOfIDC': 'type_of_idc_description',
'undefinitizedAction': 'undefinitized_action_desc'}
if atom_type == "award":
value_map['contractActionType'] = 'contract_award_type_desc'
else:
value_map['contractActionType'] = 'idv_type_description'
value_map['multipleOrSingleAwardIDC'] = 'multiple_or_single_aw_desc'
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
return obj
def dollar_values_values(data, obj):
""" Get values from the dollarValues level of the xml """
value_map = {'baseAndAllOptionsValue': 'base_and_all_options_value',
'baseAndExercisedOptionsValue': 'base_exercised_options_val',
'obligatedAmount': 'federal_action_obligation'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
return obj
def total_dollar_values_values(data, obj):
""" Get values from the totalDollarValues level of the xml """
value_map = {'totalBaseAndAllOptionsValue': 'potential_total_value_awar',
'totalBaseAndExercisedOptionsValue': 'current_total_value_award',
'totalObligatedAmount': 'total_obligated_amount'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
return obj
def legislative_mandates_values(data, obj):
""" Get values from the legislativeMandates level of the xml """
value_map = {'ClingerCohenAct': 'clinger_cohen_act_planning',
'constructionWageRateRequirements': 'construction_wage_rate_req',
'interagencyContractingAuthority': 'interagency_contracting_au',
'otherStatutoryAuthority': 'other_statutory_authority',
'laborStandards': 'labor_standards',
'materialsSuppliesArticlesEquipment': 'materials_supplies_article'}
additional_reporting = None
try:
ar_dicts = data['listOfAdditionalReportingValues']['additionalReportingValue']
except (KeyError, TypeError):
ar_dicts = None
if ar_dicts:
# if there is only one dict, convert it to a list of one dict
if isinstance(ar_dicts, dict):
ar_dicts = [ar_dicts]
ars = []
for ar_dict in ar_dicts:
ar_value = extract_text(ar_dict)
try:
ar_desc = extract_text(ar_dict['@description'])
except (KeyError, TypeError):
ar_desc = None
ar_str = ar_value if ar_desc is None else '{}: {}'.format(ar_value, ar_desc)
ars.append(ar_str)
additional_reporting = '; '.join(ars)
obj['additional_reporting'] = additional_reporting
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'ClingerCohenAct': 'clinger_cohen_act_pla_desc',
'constructionWageRateRequirements': 'construction_wage_rat_desc',
'interagencyContractingAuthority': 'interagency_contract_desc',
'laborStandards': 'labor_standards_descrip',
'materialsSuppliesArticlesEquipment': 'materials_supplies_descrip'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
return obj
def place_of_performance_values(data, obj):
""" Get values from the placeOfPerformance level of the xml """
value_map = {'placeOfPerformanceCongressionalDistrict': 'place_of_performance_congr',
'placeOfPerformanceZIPCode': 'place_of_performance_zip4a'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# placeOfPerformanceName
try:
obj['place_of_perform_city_name'] = extract_text(data['placeOfPerformanceZIPCode']['@city'])
except (KeyError, TypeError):
obj['place_of_perform_city_name'] = None
# placeOfPerformanceName
try:
obj['place_of_perform_county_na'] = extract_text(data['placeOfPerformanceZIPCode']['@county'])
except (KeyError, TypeError):
obj['place_of_perform_county_na'] = None
# within placeOfPerformance, the principalPlaceOfPerformance sub-level
value_map = {'stateCode': 'place_of_performance_state',
'countryCode': 'place_of_perform_country_c'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['principalPlaceOfPerformance'][key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'countryCode': 'place_of_perf_country_desc',
'stateCode': 'place_of_perfor_state_desc'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['principalPlaceOfPerformance'][key]['@name'])
except (KeyError, TypeError):
obj[value] = None
return obj
def product_or_service_information_values(data, obj):
""" Get values from the productOrServiceInformation level of the xml """
value_map = {'claimantProgramCode': 'dod_claimant_program_code',
'contractBundling': 'contract_bundling',
'countryOfOrigin': 'country_of_product_or_serv',
'informationTechnologyCommercialItemCategory': 'information_technology_com',
'manufacturingOrganizationType': 'domestic_or_foreign_entity',
'placeOfManufacture': 'place_of_manufacture',
'principalNAICSCode': 'naics',
'productOrServiceCode': 'product_or_service_code',
'recoveredMaterialClauses': 'recovered_materials_sustai',
'systemEquipmentCode': 'program_system_or_equipmen',
'useOfEPADesignatedProducts': 'epa_designated_product'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'claimantProgramCode': 'dod_claimant_prog_cod_desc',
'contractBundling': 'contract_bundling_descrip',
'informationTechnologyCommercialItemCategory': 'information_technolog_desc',
'manufacturingOrganizationType': 'domestic_or_foreign_e_desc',
'placeOfManufacture': 'place_of_manufacture_desc',
'principalNAICSCode': 'naics_description',
'productOrServiceCode': 'product_or_service_co_desc',
'recoveredMaterialClauses': 'recovered_materials_s_desc',
'systemEquipmentCode': 'program_system_or_equ_desc',
'useOfEPADesignatedProducts': 'epa_designated_produc_desc'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
# get country of origin name
try:
obj['country_of_product_or_desc'] = extract_text(data['countryOfOrigin']['@name'])
except (KeyError, TypeError):
obj['country_of_product_or_desc'] = None
return obj
def purchaser_information_values(data, obj):
""" Get values from the purchaserInformation level of the xml """
value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_c',
'contractingOfficeID': 'awarding_office_code',
'foreignFunding': 'foreign_funding',
'fundingRequestingAgencyID': 'funding_sub_tier_agency_co',
'fundingRequestingOfficeID': 'funding_office_code'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'foreignFunding': 'foreign_funding_desc'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
# name values associated with certain values in purchaserInformation
value_map = {'contractingOfficeAgencyID': 'awarding_sub_tier_agency_n',
'contractingOfficeID': 'awarding_office_name',
'fundingRequestingAgencyID': 'funding_sub_tier_agency_na',
'fundingRequestingOfficeID': 'funding_office_name'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@name'])
except (KeyError, TypeError):
obj[value] = None
return obj
def relevant_contract_dates_values(data, obj):
""" Get values from the relevantContractDates level of the xml """
value_map = {'currentCompletionDate': 'period_of_performance_curr',
'effectiveDate': 'period_of_performance_star',
'lastDateToOrder': 'ordering_period_end_date',
'signedDate': 'action_date',
'ultimateCompletionDate': 'period_of_perf_potential_e'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
return obj
def vendor_values(data, obj):
""" Get values from the vendor level of the xml """
# base vendor level
value_map = {'CCRException': 'sam_exception',
'contractingOfficerBusinessSizeDetermination': 'contracting_officers_deter'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# get descriptions for things in the value map
value_map = {'CCRException': 'sam_exception_description',
'contractingOfficerBusinessSizeDetermination': 'contracting_officers_desc'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key]['@description'])
except (KeyError, TypeError):
obj[value] = None
# vendorHeader sub-level
value_map = {'vendorAlternateName': 'vendor_alternate_name',
'vendorDoingAsBusinessName': 'vendor_doing_as_business_n',
'vendorEnabled': 'vendor_enabled',
'vendorLegalOrganizationName': 'vendor_legal_org_name',
'vendorName': 'awardee_or_recipient_legal'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorHeader'][key])
except (KeyError, TypeError):
obj[value] = None
# make sure key exists before passing it
try:
data['vendorSiteDetails']
except KeyError:
data['vendorSiteDetails'] = {}
# vendorSiteDetails sub-level (there are a lot so it gets its own function)
obj = vendor_site_details_values(data['vendorSiteDetails'], obj)
return obj
def vendor_site_details_values(data, obj):
""" Get values from the vendorSiteDetails level of the xml (sub-level of vendor) """
# base vendorSiteDetails level
value_map = {'divisionName': 'division_name',
'divisionNumberOrOfficeCode': 'division_number_or_office',
'vendorAlternateSiteCode': 'vendor_alternate_site_code',
'vendorSiteCode': 'vendor_site_code'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data[key])
except (KeyError, TypeError):
obj[value] = None
# typeOfEducationalEntity sub-level
value_map = {'is1862LandGrantCollege': 'c1862_land_grant_college',
'is1890LandGrantCollege': 'c1890_land_grant_college',
'is1994LandGrantCollege': 'c1994_land_grant_college',
'isAlaskanNativeServicingInstitution': 'alaskan_native_servicing_i',
'isHistoricallyBlackCollegeOrUniversity': 'historically_black_college',
'isMinorityInstitution': 'minority_institution',
'isNativeHawaiianServicingInstitution': 'native_hawaiian_servicing',
'isPrivateUniversityOrCollege': 'private_university_or_coll',
'isSchoolOfForestry': 'school_of_forestry',
'isStateControlledInstitutionofHigherLearning': 'state_controlled_instituti',
'isTribalCollege': 'tribal_college',
'isVeterinaryCollege': 'veterinary_college'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['typeOfEducationalEntity'][key])
except (KeyError, TypeError):
obj[value] = None
# typeOfGovernmentEntity sub-level
value_map = {'isAirportAuthority': 'airport_authority',
'isCouncilOfGovernments': 'council_of_governments',
'isHousingAuthoritiesPublicOrTribal': 'housing_authorities_public',
'isInterstateEntity': 'interstate_entity',
'isPlanningCommission': 'planning_commission',
'isPortAuthority': 'port_authority',
'isTransitAuthority': 'transit_authority'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['typeOfGovernmentEntity'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorBusinessTypes sub-level
value_map = {'isCommunityDevelopedCorporationOwnedFirm': 'community_developed_corpor',
'isForeignGovernment': 'foreign_government',
'isLaborSurplusAreaFirm': 'labor_surplus_area_firm',
'isStateGovernment': 'us_state_government',
'isTribalGovernment': 'us_tribal_government'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorBusinessTypes'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorBusinessTypes > businessOrOrganizationType sub-level
value_map = {'isCorporateEntityNotTaxExempt': 'corporate_entity_not_tax_e',
'isCorporateEntityTaxExempt': 'corporate_entity_tax_exemp',
'isInternationalOrganization': 'international_organization',
'isPartnershipOrLimitedLiabilityPartnership': 'partnership_or_limited_lia',
'isSmallAgriculturalCooperative': 'small_agricultural_coopera',
'isSolePropreitorship': 'sole_proprietorship',
'isUSGovernmentEntity': 'us_government_entity'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorBusinessTypes']['businessOrOrganizationType'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorBusinessTypes > federalGovernment sub-level
value_map = {'isFederalGovernment': 'us_federal_government',
'isFederalGovernmentAgency': 'federal_agency',
'isFederallyFundedResearchAndDevelopmentCorp': 'federally_funded_research'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorBusinessTypes']['federalGovernment'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorBusinessTypes > localGovernment sub-level
value_map = {'isCityLocalGovernment': 'city_local_government',
'isCountyLocalGovernment': 'county_local_government',
'isInterMunicipalLocalGovernment': 'inter_municipal_local_gove',
'isLocalGovernment': 'us_local_government',
'isLocalGovernmentOwned': 'local_government_owned',
'isMunicipalityLocalGovernment': 'municipality_local_governm',
'isSchoolDistrictLocalGovernment': 'school_district_local_gove',
'isTownshipLocalGovernment': 'township_local_government'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorBusinessTypes']['localGovernment'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorCertifications sub-level
value_map = {'isDOTCertifiedDisadvantagedBusinessEnterprise': 'dot_certified_disadvantage',
'isSBACertified8AJointVenture': 'sba_certified_8_a_joint_ve',
'isSBACertified8AProgramParticipant': 'c8a_program_participant',
'isSBACertifiedHUBZone': 'historically_underutilized',
'isSBACertifiedSmallDisadvantagedBusiness': 'small_disadvantaged_busine',
'isSelfCertifiedSmallDisadvantagedBusiness': 'self_certified_small_disad'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorCertifications'][key])
except (KeyError, TypeError):
obj[value] = None
# entityIdentifiers sub-level
try:
obj['cage_code'] = extract_text(data['entityIdentifiers']['cageCode'])
except (KeyError, TypeError):
obj['cage_code'] = None
# entityIdentifiers > vendorDUNSInformation sub-level
value_map = {'DUNSNumber': 'awardee_or_recipient_uniqu',
'globalParentDUNSName': 'ultimate_parent_legal_enti',
'globalParentDUNSNumber': 'ultimate_parent_unique_ide'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['entityIdentifiers']['vendorDUNSInformation'][key])
except (KeyError, TypeError):
obj[value] = None
# entityIdentifiers > vendorUEIInformation sub-level
value_map = {'UEI': 'awardee_or_recipient_uei',
'ultimateParentUEI': 'ultimate_parent_uei'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['entityIdentifiers']['vendorUEIInformation'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorLineOfBusiness sub-level
value_map = {'isCommunityDevelopmentCorporation': 'community_development_corp',
'isDomesticShelter': 'domestic_shelter',
'isEducationalInstitution': 'educational_institution',
'isFoundation': 'foundation',
'isHispanicServicingInstitution': 'hispanic_servicing_institu',
'isHospital': 'hospital_flag',
'isManufacturerOfGoods': 'manufacturer_of_goods',
'isVeterinaryHospital': 'veterinary_hospital'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorLineOfBusiness'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorLocation sub-level
value_map = {'city': 'legal_entity_city_name',
'congressionalDistrictCode': 'legal_entity_congressional',
'countryCode': 'legal_entity_country_code',
'faxNo': 'vendor_fax_number',
'phoneNo': 'vendor_phone_number',
'streetAddress': 'legal_entity_address_line1',
'streetAddress2': 'legal_entity_address_line2',
'streetAddress3': 'legal_entity_address_line3',
'vendorLocationDisabledFlag': 'vendor_location_disabled_f',
'ZIPCode': 'legal_entity_zip4'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorLocation'][key])
except (KeyError, TypeError):
obj[value] = None
# differentiating between US and foreign states
key = 'legal_entity_state_code'
if obj['legal_entity_country_code'] not in country_code_map:
key = 'legal_entity_state_descrip'
# need to set this even if we're not going to be having a code because we need to access it later
obj['legal_entity_state_code'] = None
# if it is in the USA, grab the description for the state
else:
try:
obj['legal_entity_state_descrip'] = extract_text(data['vendorLocation']['state']['@name'])
except (KeyError, TypeError):
obj['legal_entity_state_descrip'] = None
try:
obj[key] = extract_text(data['vendorLocation']['state'])
except (KeyError, TypeError):
obj[key] = None
# getting the name associated with the country code
try:
obj['legal_entity_country_name'] = extract_text(data['vendorLocation']['countryCode']['@name'])
except (KeyError, TypeError):
obj['legal_entity_country_name'] = None
# vendorOrganizationFactors sub-level
value_map = {'isForeignOwnedAndLocated': 'foreign_owned_and_located',
'isLimitedLiabilityCorporation': 'limited_liability_corporat',
'isShelteredWorkshop': 'the_ability_one_program',
'isSubchapterSCorporation': 'subchapter_s_corporation',
'organizationalType': 'organizational_type'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorOrganizationFactors'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorOrganizationFactors > profitStructure sub-level
value_map = {'isForProfitOrganization': 'for_profit_organization',
'isNonprofitOrganization': 'nonprofit_organization',
'isOtherNotForProfitOrganization': 'other_not_for_profit_organ'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorOrganizationFactors']['profitStructure'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorRelationshipWithFederalGovernment sub-level
value_map = {'receivesContracts': 'contracts',
'receivesContractsAndGrants': 'receives_contracts_and_gra',
'receivesGrants': 'grants'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorRelationshipWithFederalGovernment'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorSocioEconomicIndicators sub-level
value_map = {'isAlaskanNativeOwnedCorporationOrFirm': 'alaskan_native_owned_corpo',
'isAmericanIndianOwned': 'american_indian_owned_busi',
'isEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'economically_disadvantaged',
'isIndianTribe': 'indian_tribe_federally_rec',
'isJointVentureEconomicallyDisadvantagedWomenOwnedSmallBusiness': 'joint_venture_economically',
'isJointVentureWomenOwnedSmallBusiness': 'joint_venture_women_owned',
'isNativeHawaiianOwnedOrganizationOrFirm': 'native_hawaiian_owned_busi',
'isServiceRelatedDisabledVeteranOwnedBusiness': 'service_disabled_veteran_o',
'isTriballyOwnedFirm': 'tribally_owned_business',
'isVerySmallBusiness': 'emerging_small_business',
'isVeteranOwned': 'veteran_owned_business',
'isWomenOwned': 'woman_owned_business',
'isWomenOwnedSmallBusiness': 'women_owned_small_business'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorSocioEconomicIndicators'][key])
except (KeyError, TypeError):
obj[value] = None
# vendorSocioEconomicIndicators > minorityOwned sub-level
value_map = {'isAsianPacificAmericanOwnedBusiness': 'asian_pacific_american_own',
'isBlackAmericanOwnedBusiness': 'black_american_owned_busin',
'isHispanicAmericanOwnedBusiness': 'hispanic_american_owned_bu',
'isMinorityOwned': 'minority_owned_business',
'isNativeAmericanOwnedBusiness': 'native_american_owned_busi',
'isOtherMinorityOwned': 'other_minority_owned_busin',
'isSubContinentAsianAmericanOwnedBusiness': 'subcontinent_asian_asian_i'}
for key, value in value_map.items():
try:
obj[value] = extract_text(data['vendorSocioEconomicIndicators']['minorityOwned'][key])
except (KeyError, TypeError):
obj[value] = None
return obj
def generic_values(data, obj):
""" Get values from the genericTags level of the xml """
generic_strings_value_map = {'genericString01': 'solicitation_date'}
for key, value in generic_strings_value_map.items():
try:
obj[value] = extract_text(data['genericStrings'][key])
except (KeyError, TypeError):
obj[value] = None
return obj
def calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list):
""" calculate values that aren't in any feed (or haven't been provided properly) for place of performance """
# only do any of these calculation if the country code is in the list of US territories
if obj['place_of_perform_country_c'] in country_code_map:
# If it's in the list but not USA, find its state code in the list and put that in the state code spot, get
# the state name, then replace country code and country description with USA and UNITED STATES respectively
if obj['place_of_perform_country_c'] != 'USA':
obj['place_of_performance_state'] = country_code_map[obj['place_of_perform_country_c']]
if obj['place_of_performance_state'] in state_code_list:
obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']]
obj['place_of_perform_country_c'] = 'USA'
obj['place_of_perf_country_desc'] = 'UNITED STATES'
# derive state name if we don't have it
if obj['place_of_performance_state'] and not obj['place_of_perfor_state_desc']\
and obj['place_of_performance_state'] in state_code_list:
obj['place_of_perfor_state_desc'] = state_code_list[obj['place_of_performance_state']]
# calculate place of performance county code
if obj['place_of_perform_county_na'] and obj['place_of_performance_state']:
state = obj['place_of_performance_state']
county_name = obj['place_of_perform_county_na']
# make sure they gave us a valid state and then check if it's in our lookup
if state in county_by_name and county_name in county_by_name[state]:
obj['place_of_perform_county_co'] = county_by_name[state][county_name]
# if accessing the county code by state code and county name didn't work, try by zip4a if we have it
if not obj['place_of_perform_county_co'] and obj['place_of_performance_zip4a']:
obj['place_of_perform_county_co'] = get_county_by_zip(sess, obj['place_of_performance_zip4a'])
# if we didn't have a county name but got the county code, we can grab the name
if not obj['place_of_perform_county_na'] and obj['place_of_performance_state'] in county_by_code\
and obj['place_of_perform_county_co'] in county_by_code[obj['place_of_performance_state']]:
obj['place_of_perform_county_na'] =\
county_by_code[obj['place_of_performance_state']][obj['place_of_perform_county_co']]
# if we have content in the zip code and it's in a valid US format, split it into 5 and 4 digit
if obj['place_of_performance_zip4a'] and is_valid_zip(obj['place_of_performance_zip4a']):
obj['place_of_performance_zip5'] = obj['place_of_performance_zip4a'][:5]
if len(obj['place_of_performance_zip4a']) > 5:
obj['place_of_perform_zip_last4'] = obj['place_of_performance_zip4a'][-4:]
# if there is any country code (checked outside function) but not a country name, try to get the country name
if not obj['place_of_perf_country_desc'] and obj['place_of_perform_country_c'] in country_list:
obj['place_of_perf_country_desc'] = country_list[obj['place_of_perform_country_c']]
def calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list):
""" calculate values that aren't in any feed (or haven't been provided properly) for legal entity """
# do legal entity derivations only if legal entity country code is in a US territory of any kind
if obj['legal_entity_country_code'] in country_code_map:
# if it's in the list but not USA, find its state code in the list and put that in the state code spot, get
# the state name, then replace country code and country description with USA and UNITED STATES respectively
if obj['legal_entity_country_code'] != 'USA':
obj['legal_entity_state_code'] = country_code_map[obj['legal_entity_country_code']]
if obj['legal_entity_state_code'] in state_code_list:
obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']]
obj['legal_entity_country_code'] = 'USA'
obj['legal_entity_country_name'] = 'UNITED STATES'
# derive state name if we don't have it
if obj['legal_entity_state_code'] and not obj['legal_entity_state_descrip']\
and obj['legal_entity_state_code'] in state_code_list:
obj['legal_entity_state_descrip'] = state_code_list[obj['legal_entity_state_code']]
# calculate legal entity county code and split zip when possible
if obj['legal_entity_zip4'] and is_valid_zip(obj['legal_entity_zip4']):
obj['legal_entity_county_code'] = get_county_by_zip(sess, obj['legal_entity_zip4'])
# if we have a county code and a state code, we can try to get the county name
if obj['legal_entity_county_code'] and obj['legal_entity_state_code']:
county_code = obj['legal_entity_county_code']
state = obj['legal_entity_state_code']
# make sure they gave us a valid state and then check if it's in our lookup
if state in county_by_code and county_code in county_by_code[state]:
obj['legal_entity_county_name'] = county_by_code[state][county_code]
obj['legal_entity_zip5'] = obj['legal_entity_zip4'][:5]
if len(obj['legal_entity_zip4']) > 5:
obj['legal_entity_zip_last4'] = obj['legal_entity_zip4'][-4:]
# if there is any country code (checked outside function) but not a country name, try to get the country name
if not obj['legal_entity_country_name'] and obj['legal_entity_country_code'] in country_list:
obj['legal_entity_country_name'] = country_list[obj['legal_entity_country_code']]
def calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list,
exec_comp_dict, atom_type):
""" Calculate values that aren't in any feed but can be calculated.
Args:
obj: a dictionary containing the details we need to derive from and to
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
Returns:
the object originally passed in with newly-calculated values added
"""
# we want to null out all the calculated columns in case this is an update to the records
obj['awarding_agency_code'] = None
obj['awarding_agency_name'] = None
obj['funding_agency_code'] = None
obj['funding_agency_name'] = None
obj['place_of_perform_county_co'] = None
obj['legal_entity_county_code'] = None
obj['legal_entity_county_name'] = None
obj['detached_award_proc_unique'] = None
# calculate awarding agency codes/names based on awarding sub tier agency codes
if obj['awarding_sub_tier_agency_c']:
try:
sub_tier_agency = sub_tier_list[obj['awarding_sub_tier_agency_c']]
use_frec = sub_tier_agency.is_frec
agency_data = sub_tier_agency.frec if use_frec else sub_tier_agency.cgac
obj['awarding_agency_code'] = agency_data.frec_code if use_frec else agency_data.cgac_code
obj['awarding_agency_name'] = agency_data.agency_name
except KeyError:
logger.info('WARNING: MissingSubtierCGAC: The awarding sub-tier cgac_code: %s does not exist in cgac table.'
' The FPDS-provided awarding sub-tier agency name (if given) for this cgac_code is %s. '
'The award has been loaded with awarding_agency_code 999.',
obj['awarding_sub_tier_agency_c'], obj['awarding_sub_tier_agency_n'])
obj['awarding_agency_code'] = '999'
obj['awarding_agency_name'] = None
# calculate funding agency codes/names based on funding sub tier agency codes
if obj['funding_sub_tier_agency_co']:
try:
sub_tier_agency = sub_tier_list[obj['funding_sub_tier_agency_co']]
use_frec = sub_tier_agency.is_frec
agency_data = sub_tier_agency.frec if use_frec else sub_tier_agency.cgac
obj['funding_agency_code'] = agency_data.frec_code if use_frec else agency_data.cgac_code
obj['funding_agency_name'] = agency_data.agency_name
except KeyError:
logger.info('WARNING: MissingSubtierCGAC: The funding sub-tier cgac_code: %s does not exist in cgac table. '
'The FPDS-provided funding sub-tier agency name (if given) for this cgac_code is %s. '
'The award has been loaded with funding_agency_code 999.',
obj['funding_sub_tier_agency_co'], obj['funding_sub_tier_agency_na'])
obj['funding_agency_code'] = '999'
obj['funding_agency_name'] = None
# do place of performance calculations only if we have SOME country code
if obj['place_of_perform_country_c']:
calculate_ppop_fields(obj, sess, county_by_name, county_by_code, state_code_list, country_list)
# do legal entity calculations only if we have SOME country code
if obj['legal_entity_country_code']:
calculate_legal_entity_fields(obj, sess, county_by_code, state_code_list, country_list)
# calculate business categories
obj['business_categories'] = get_business_categories(row=obj, data_type='fpds')
# Calculate executive compensation data for the entry.
if obj['awardee_or_recipient_uniqu'] and obj['awardee_or_recipient_uniqu'] in exec_comp_dict.keys():
exec_comp = exec_comp_dict[obj['awardee_or_recipient_uniqu']]
for i in range(1, 6):
obj['high_comp_officer{}_full_na'.format(i)] = exec_comp['officer{}_name'.format(i)]
obj['high_comp_officer{}_amount'.format(i)] = exec_comp['officer{}_amt'.format(i)]
else:
# Need to make sure they're null in case this is updating and the DUNS has changed somehow
for i in range(1, 6):
obj['high_comp_officer{}_full_na'.format(i)] = None
obj['high_comp_officer{}_amount'.format(i)] = None
# calculate unique award key
if atom_type == 'award':
unique_award_string_list = ['CONT_AWD']
key_list = ['piid', 'agency_id', 'parent_award_id', 'referenced_idv_agency_iden']
else:
unique_award_string_list = ['CONT_IDV']
key_list = ['piid', 'agency_id']
for item in key_list:
# Get the value in the object or, if the key doesn't exist or value is None, set it to "-none-"
unique_award_string_list.append(obj.get(item) or '-none-')
obj['unique_award_key'] = '_'.join(unique_award_string_list).upper()
# calculate unique key
key_list = ['agency_id', 'referenced_idv_agency_iden', 'piid', 'award_modification_amendme', 'parent_award_id',
'transaction_number']
idv_list = ['agency_id', 'piid', 'award_modification_amendme']
unique_string = ""
for item in key_list:
if len(unique_string) > 0:
unique_string += "_"
if atom_type == 'award' or item in idv_list:
# Get the value in the object or, if the key doesn't exist or value is None, set it to "-none-"
unique_string += obj.get(item) or '-none-'
else:
unique_string += '-none-'
# The order of the unique key is agency_id, referenced_idv_agency_iden, piid, award_modification_amendme,
# parent_award_id, transaction_number
obj['detached_award_proc_unique'] = unique_string
return obj
def process_data(data, sess, atom_type, sub_tier_list, county_by_name, county_by_code, state_code_list, country_list,
exec_comp_dict):
""" Process the data coming in.
Args:
data: an object containing the data gathered from the feed
sess: the database connection
atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
Returns:
An object containing the processed and calculated data.
"""
obj = {}
if atom_type == "award":
# make sure key exists before passing it
try:
data['awardID']
except KeyError:
data['awardID'] = {}
obj = award_id_values(data['awardID'], obj)
else:
# transaction_number is a part of the unique identifier, set it to None
obj['transaction_number'] = None
# make sure key exists before passing it
try:
data['contractID']
except KeyError:
data['contractID'] = {}
obj = contract_id_values(data['contractID'], obj)
# make sure key exists before passing it
try:
data['competition']
except KeyError:
data['competition'] = {}
obj = competition_values(data['competition'], obj)
# make sure key exists before passing it
try:
data['contractData']
except KeyError:
data['contractData'] = {}
obj = contract_data_values(data['contractData'], obj, atom_type)
# make sure key exists before passing it
try:
data['dollarValues']
except KeyError:
data['dollarValues'] = {}
obj = dollar_values_values(data['dollarValues'], obj)
# make sure key exists before passing it
try:
data['totalDollarValues']
except KeyError:
data['totalDollarValues'] = {}
obj = total_dollar_values_values(data['totalDollarValues'], obj)
if atom_type == "award":
# make sure key exists before passing it
try:
data['placeOfPerformance']
except KeyError:
data['placeOfPerformance'] = {}
obj = place_of_performance_values(data['placeOfPerformance'], obj)
# these values need to be filled so the existence check when calculating county data doesn't freak out
else:
obj['place_of_perform_county_na'] = None
obj['place_of_performance_state'] = None
obj['place_of_perfor_state_desc'] = None
obj['place_of_performance_zip4a'] = None
obj['place_of_perform_country_c'] = None
obj['place_of_perf_country_desc'] = None
# make sure key exists before passing it
try:
data['legislativeMandates']
except KeyError:
data['legislativeMandates'] = {}
obj = legislative_mandates_values(data['legislativeMandates'], obj)
try:
obj['subcontracting_plan'] = extract_text(data['preferencePrograms']['subcontractPlan'])
except (KeyError, TypeError):
obj['subcontracting_plan'] = None
try:
obj['subcontracting_plan_desc'] = extract_text(data['preferencePrograms']['subcontractPlan']['@description'])
except (KeyError, TypeError):
obj['subcontracting_plan_desc'] = None
# make sure key exists before passing it
try:
data['productOrServiceInformation']
except KeyError:
data['productOrServiceInformation'] = {}
obj = product_or_service_information_values(data['productOrServiceInformation'], obj)
# make sure key exists before passing it
try:
data['purchaserInformation']
except KeyError:
data['purchaserInformation'] = {}
obj = purchaser_information_values(data['purchaserInformation'], obj)
# make sure key exists before passing it
try:
data['relevantContractDates']
except KeyError:
data['relevantContractDates'] = {}
obj = relevant_contract_dates_values(data['relevantContractDates'], obj)
# make sure key exists before passing it
try:
data['vendor']
except KeyError:
data['vendor'] = {}
obj = vendor_values(data['vendor'], obj)
# make sure key exists before passing it
try:
data['genericTags']
except KeyError:
data['genericTags'] = {}
obj = generic_values(data['genericTags'], obj)
obj = calculate_remaining_fields(obj, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, atom_type)
try:
obj['last_modified'] = extract_text(data['transactionInformation']['lastModifiedDate'])
except (KeyError, TypeError):
obj['last_modified'] = None
try:
obj['initial_report_date'] = extract_text(data['transactionInformation']['createdDate'])
except (KeyError, TypeError):
obj['initial_report_date'] = None
obj['pulled_from'] = atom_type
# clear out potentially excel-breaking whitespace from specific fields
free_fields = ["award_description", "vendor_doing_as_business_n", "legal_entity_address_line1",
"legal_entity_address_line2", "legal_entity_address_line3", "ultimate_parent_legal_enti",
"awardee_or_recipient_legal", "other_statutory_authority"]
for field in free_fields:
if obj[field]:
obj[field] = re.sub('\s', ' ', obj[field])
return obj
def process_delete_data(data, atom_type):
""" process the delete feed data coming in """
unique_string = ""
# order of unique constraints in string: agency_id, referenced_idv_agency_iden, piid, award_modification_amendme,
# parent_award_id, transaction_number
# get all values that make up unique key
if atom_type == "award":
try:
unique_string += extract_text(data['awardID']['awardContractID']['agencyID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['referencedIDVID']['agencyID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['awardContractID']['PIID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['awardContractID']['modNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['referencedIDVID']['PIID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['awardContractID']['transactionNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
else:
try:
unique_string += extract_text(data['contractID']['IDVID']['agencyID'])
except (KeyError, TypeError):
unique_string += "-none-"
# referenced_idv_agency_iden not used in IDV identifier, just set it to "-none-"
unique_string += "_-none-_"
try:
unique_string += extract_text(data['contractID']['IDVID']['PIID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['contractID']['IDVID']['modNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
# parent_award_id not used in IDV identifier and transaction_number not in IDV feed, just set them to "-none-"
unique_string += "_-none-_-none-"
return unique_string
def create_processed_data_list(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code,
state_code_list, country_list, exec_comp_dict):
""" Create a list of processed data
Args:
data: an object containing the data gathered from the feed
sess: the database connection
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
Returns:
A list containing the processed and calculated data.
"""
data_list = []
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
data_list.append(tmp_obj)
return data_list
def add_processed_data_list(data, sess):
try:
sess.bulk_save_objects([DetachedAwardProcurement(**fpds_data) for fpds_data in data])
sess.commit()
except IntegrityError:
sess.rollback()
logger.error("Attempted to insert duplicate FPDS data. Inserting each row in batch individually.")
for fpds_obj in data:
insert_statement = insert(DetachedAwardProcurement).values(**fpds_obj).\
on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=fpds_obj)
sess.execute(insert_statement)
sess.commit()
def process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, now, threaded=False):
""" Start the processing for data and add it to the DB.
Args:
data: an object containing the data gathered from the feed
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
now: a timestamp indicating the time to set the updated_at to
threaded: a boolean indicating whether the process is running as a thread or not
"""
if threaded:
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
tmp_obj['updated_at'] = now
insert_statement = insert(DetachedAwardProcurement).values(**tmp_obj).\
on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=tmp_obj)
sess.execute(insert_statement)
else:
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
try:
statement = insert(DetachedAwardProcurement).values(**tmp_obj)
sess.execute(statement)
sess.commit()
except IntegrityError:
sess.rollback()
tmp_obj['updated_at'] = now
sess.query(DetachedAwardProcurement).\
filter_by(detached_award_proc_unique=tmp_obj['detached_award_proc_unique']).\
update(tmp_obj, synchronize_session=False)
sess.commit()
def get_with_exception_hand(url_string, expect_entries=True):
""" Retrieve data from FPDS, allow for multiple retries and timeouts """
exception_retries = -1
retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600]
request_timeout = 60
while exception_retries < len(retry_sleep_times):
try:
resp = requests.get(url_string, timeout=request_timeout)
if expect_entries:
# we should always expect entries, otherwise we shouldn't be calling it
resp_dict = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
len(list_data(resp_dict['feed']['entry']))
break
except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout, KeyError) as e:
exception_retries += 1
request_timeout += 60
if exception_retries < len(retry_sleep_times):
logger.info('Connection exception. Sleeping {}s and then retrying with a max wait of {}s...'
.format(retry_sleep_times[exception_retries], request_timeout))
time.sleep(retry_sleep_times[exception_retries])
else:
logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.')
raise e
return resp
def get_total_expected_records(base_url):
""" Retrieve the total number of expected records based on the last paginated URL """
# get a single call so we can find the last page
initial_request = get_with_exception_hand(base_url, expect_entries=False)
initial_request_xml = xmltodict.parse(initial_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
# retrieve all URLs
try:
urls_list = list_data(initial_request_xml['feed']['link'])
except KeyError:
urls_list = []
# retrieve the "last" URL from the list
final_request_url = None
for url in urls_list:
if url['@rel'] == 'last':
final_request_url = url['@href']
continue
# retrieve the count from the URL of the last page
if not final_request_url:
try:
return len(list_data(initial_request_xml['feed']['entry']))
except KeyError:
return 0
# retrieve the page from the final_request_url
final_request_count = int(final_request_url.split('&start=')[-1])
# retrieve the last page of data
final_request = get_with_exception_hand(final_request_url)
final_request_xml = xmltodict.parse(final_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
entries_list = list_data(final_request_xml['feed']['entry'])
except KeyError:
raise Exception("Initial count failed, no entries in last page of request.")
return final_request_count + len(entries_list)
def get_data(contract_type, award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, last_run=None, threaded=False, start_date=None, end_date=None, metrics=None,
specific_params=None):
""" Get the data from the atom feed based on contract/award type and the last time the script was run.
Args:
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
award_type: a string indicating what the award type of the feed being checked is
now: a timestamp indicating the time to set the updated_at to
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
last_run: a date indicating the last time the pull was run
threaded: a boolean indicating whether the process is running as a thread or not
start_date: a date indicating the first date to pull from (must be provided with end_date)
end_date: a date indicating the last date to pull from (must be provided with start_date)
metrics: a dictionary to gather metrics for the script in
specific_params: a string containing a specific set of params to run the query with (used for outside
scripts that need to run a data load)
"""
if not metrics:
metrics = {}
data = []
yesterday = now - datetime.timedelta(days=1)
utcnow = datetime.datetime.utcnow()
# If a specific set of params was provided, use that
if specific_params:
params = specific_params
# if a date that the script was last successfully run is not provided, get all data
elif not last_run:
params = 'SIGNED_DATE:[2016/10/01,' + yesterday.strftime('%Y/%m/%d') + '] '
metrics['start_date'] = '2016/10/01'
metrics['end_date'] = yesterday.strftime('%Y/%m/%d')
# if a date that the script was last successfully run is provided, get data since that date
else:
last_run_date = last_run - relativedelta(days=1)
params = 'LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d') + ',' + yesterday.strftime('%Y/%m/%d') + '] '
metrics['start_date'] = last_run_date.strftime('%Y/%m/%d')
metrics['end_date'] = yesterday.strftime('%Y/%m/%d')
if start_date and end_date:
params = 'LAST_MOD_DATE:[' + start_date + ',' + end_date + '] '
metrics['start_date'] = start_date
metrics['end_date'] = end_date
base_url = feed_url + params + 'CONTRACT_TYPE:"' + contract_type.upper() + '" AWARD_TYPE:"' + award_type + '"'
logger.info('Starting get feed: %s', base_url)
# retrieve the total count of expected records for this pull
total_expected_records = get_total_expected_records(base_url)
logger.info('{} record(s) expected from this feed'.format(total_expected_records))
entries_processed = 0
while True:
# pull in the next MAX_ENTRIES * REQUESTS_AT_ONCE until we get anything less than the MAX_ENTRIES
async def atom_async_get(entries_already_processed, total_expected_records):
response_list = []
loop = asyncio.get_event_loop()
requests_at_once = MAX_REQUESTS_AT_ONCE
if total_expected_records - entries_already_processed < (MAX_REQUESTS_AT_ONCE * MAX_ENTRIES):
# adding +1 to ensure that they're not adding anything since we got the expected count
requests_at_once = math.ceil((total_expected_records - entries_already_processed) / MAX_ENTRIES) + 1
futures = [
loop.run_in_executor(
None,
get_with_exception_hand,
base_url + "&start=" + str(entries_already_processed + (start_offset * MAX_ENTRIES)),
total_expected_records > entries_already_processed + (start_offset * MAX_ENTRIES)
)
for start_offset in range(requests_at_once)
]
for response in await asyncio.gather(*futures):
response_list.append(response.text)
pass
return response_list
# End async get requests def
loop = asyncio.get_event_loop()
full_response = loop.run_until_complete(atom_async_get(entries_processed, total_expected_records))
for next_resp in full_response:
response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
entries_per_response = list_data(response_dict['feed']['entry'])
except KeyError:
continue
if last_run or specific_params:
for entry in entries_per_response:
data.append(entry)
entries_processed += 1
else:
data.extend(create_processed_data_list(entries_per_response, contract_type, sess, sub_tier_list,
county_by_name, county_by_code, state_code_list, country_list,
exec_comp_dict))
entries_processed += len(entries_per_response)
if entries_processed > total_expected_records:
# Find entries that don't have FPDS content and print them all
for next_resp in full_response:
response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
list_data(response_dict['feed']['entry'])
except KeyError:
logger.info(response_dict)
continue
raise Exception("Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}"
.format(total_expected_records, len(data)))
if data:
# Log which one we're on so we can keep track of how far we are, insert into DB ever 1k lines
logger.info("Retrieved %s lines of get %s: %s feed, writing next %s to DB",
entries_processed, contract_type, award_type, len(data))
if last_run or specific_params:
process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code,
state_code_list, country_list, exec_comp_dict, utcnow, threaded)
else:
add_processed_data_list(data, sess)
logger.info("Successfully inserted %s lines of get %s: %s feed, continuing feed retrieval",
len(data), contract_type, award_type)
# if we got less than the full set of records, we can stop calling the feed
if len(data) < (MAX_ENTRIES * MAX_REQUESTS_AT_ONCE):
# ensure we loaded the number of records we expected to, otherwise we'll need to reload
if entries_processed != total_expected_records:
raise Exception("Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}"
.format(total_expected_records, entries_processed))
else:
if 'records_received' not in metrics:
metrics['records_received'] = total_expected_records
else:
metrics['records_received'] += total_expected_records
break
else:
data = []
logger.info("Total entries in %s: %s feed: %s", contract_type, award_type, entries_processed)
logger.info("Processed %s: %s data", contract_type, award_type)
def get_delete_data(contract_type, now, sess, last_run, start_date=None, end_date=None, metrics=None):
""" Get data from the delete feed """
if not metrics:
metrics = {}
data = []
yesterday = now - datetime.timedelta(days=1)
last_run_date = last_run - relativedelta(days=1)
params = 'LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d') + ',' + yesterday.strftime('%Y/%m/%d') + '] '
if start_date and end_date:
params = 'LAST_MOD_DATE:[' + start_date + ',' + end_date + '] '
# If we just call deletes, we have to set the date. If we don't provide dates, some other part has to have run
# already so this is the only place it needs to get set.
if not metrics['start_date']:
metrics['start_date'] = start_date
if not metrics['end_date']:
metrics['end_date'] = end_date
base_url = delete_url + params + 'CONTRACT_TYPE:"' + contract_type.upper() + '"'
logger.info('Starting delete feed: %s', base_url)
# retrieve the total count of expected records for this pull
total_expected_records = get_total_expected_records(base_url)
logger.info('{} record(s) expected from this feed'.format(total_expected_records))
processed_deletions = 0
while True:
exception_retries = -1
retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600]
request_timeout = 60
try:
resp = requests.get(base_url + '&start=' + str(processed_deletions), timeout=request_timeout)
resp_data = xmltodict.parse(resp.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout) as e:
exception_retries += 1
request_timeout += 60
if exception_retries < len(retry_sleep_times):
logger.info('Connection exception caught. Sleeping {}s and then retrying with a max wait of {}s...'
.format(retry_sleep_times[exception_retries], request_timeout))
time.sleep(retry_sleep_times[exception_retries])
else:
logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.')
raise e
# only list the data if there's data to list
try:
listed_data = list_data(resp_data['feed']['entry'])
except KeyError:
listed_data = []
if processed_deletions > total_expected_records:
raise Exception("Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}"
.format(total_expected_records, len(processed_deletions)))
for ld in listed_data:
data.append(ld)
processed_deletions += 1
# Every 100 lines, log which one we're on so we can keep track of how far we are
if processed_deletions % 100 == 0:
logger.info("On line %s of %s delete feed", str(processed_deletions), contract_type)
# if we got less than the full set of records we can stop calling the feed
if len(listed_data) < 10:
# ensure we loaded the number of records we expected to, otherwise we'll need to reload
if processed_deletions != total_expected_records:
raise Exception("Records retrieved != Total expected records\nExpected: {}\nRetrieved: {}"
.format(total_expected_records, len(listed_data)))
else:
if 'deletes_received' not in metrics:
metrics['deletes_received'] = total_expected_records
else:
metrics['deletes_received'] += total_expected_records
break
else:
listed_data = []
logger.info("Total entries in %s delete feed: %s", contract_type, str(processed_deletions))
delete_list = []
delete_dict = {}
for value in data:
# get last modified date
last_modified = value['content'][contract_type]['transactionInformation']['lastModifiedDate']
unique_string = process_delete_data(value['content'][contract_type], atom_type=contract_type)
existing_item = sess.query(DetachedAwardProcurement).\
filter_by(detached_award_proc_unique=unique_string).one_or_none()
if existing_item:
# only add to delete list if the last modified date is later than the existing entry's last modified date
if last_modified > existing_item.last_modified:
delete_list.append(existing_item.detached_award_procurement_id)
delete_dict[existing_item.detached_award_procurement_id] = existing_item.detached_award_proc_unique
# only need to delete values if there's something to delete
if delete_list:
if 'records_deleted' not in metrics:
metrics['records_deleted'] = len(delete_list)
else:
metrics['records_deleted'] += len(delete_list)
sess.query(DetachedAwardProcurement).\
filter(DetachedAwardProcurement.detached_award_procurement_id.in_(delete_list)).\
delete(synchronize_session=False)
# writing the file
seconds = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
file_name = now.strftime('%m-%d-%Y') + "_delete_records_" + contract_type + "_" + str(seconds) + ".csv"
metrics['deleted_{}_records_file'.format(contract_type).lower()] = file_name
headers = ["detached_award_procurement_id", "detached_award_proc_unique"]
if CONFIG_BROKER["use_aws"]:
s3client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
# add headers
contents = bytes((",".join(headers) + "\n").encode())
for key, value in delete_dict.items():
contents += bytes('{},{}\n'.format(key, value).encode())
s3client.put_object(Bucket=CONFIG_BROKER['fpds_delete_bucket'], Key=file_name, Body=contents)
else:
with CsvLocalWriter(file_name, headers) as writer:
for key, value in delete_dict.items():
writer.write([key, value])
writer.finish_batch()
def create_lookups(sess):
""" Create the lookups used for FPDS derivations.
Args:
sess: connection to database
Returns:
Dictionaries of sub tier agencies by code, country names by code, county names by state code + county
code, county codes by state code + county name, state name by code, and executive compensation data by
DUNS number
"""
# get and create list of sub tier agencies
sub_tiers = sess.query(SubTierAgency).all()
sub_tier_list = {}
for sub_tier in sub_tiers:
sub_tier_list[sub_tier.sub_tier_agency_code] = sub_tier
# get and create list of country code -> country name mappings.
countries = sess.query(CountryCode).all()
country_list = {}
for country in countries:
country_list[country.country_code] = country.country_name
# get and create list of state code -> state name mappings. Prime the county lists with state codes
county_by_name = {}
county_by_code = {}
state_code_list = {}
state_codes = sess.query(States.state_code, func.upper(States.state_name).label('state_name')).all()
for state_code in state_codes:
county_by_name[state_code.state_code] = {}
county_by_code[state_code.state_code] = {}
state_code_list[state_code.state_code] = state_code.state_name
# Fill the county lists with data (code -> name mappings and name -> code mappings)
county_codes = sess.query(CountyCode.county_number, CountyCode.state_code,
func.upper(CountyCode.county_name).label('county_name')).all()
for county_code in county_codes:
# we don't want any "(CA)" endings, so strip those
county_name = county_code.county_name.replace(' (CA)', '').strip()
# we want all the counties in our by-code lookup because we'd be using this table anyway for derivations
county_by_code[county_code.state_code][county_code.county_number] = county_name
# if the county name has only letters/spaces then we want it in our by-name lookup, the rest have the potential
# to be different from the FPDS feed
if re.match('^[A-Z\s]+$', county_code.county_name):
county_by_name[county_code.state_code][county_name] = county_code.county_number
# get and create list of duns -> exec comp data mappings
exec_comp_dict = {}
duns_list = sess.query(DUNS).filter(DUNS.high_comp_officer1_full_na.isnot(None)).all()
for duns in duns_list:
exec_comp_dict[duns.awardee_or_recipient_uniqu] = \
{'officer1_name': duns.high_comp_officer1_full_na, 'officer1_amt': duns.high_comp_officer1_amount,
'officer2_name': duns.high_comp_officer2_full_na, 'officer2_amt': duns.high_comp_officer2_amount,
'officer3_name': duns.high_comp_officer3_full_na, 'officer3_amt': duns.high_comp_officer3_amount,
'officer4_name': duns.high_comp_officer4_full_na, 'officer4_amt': duns.high_comp_officer4_amount,
'officer5_name': duns.high_comp_officer5_full_na, 'officer5_amt': duns.high_comp_officer5_amount}
del duns_list
return sub_tier_list, country_list, state_code_list, county_by_name, county_by_code, exec_comp_dict
def main():
sess = GlobalDB.db().session
now = datetime.datetime.now()
parser = argparse.ArgumentParser(description='Pull data from the FPDS Atom Feed.')
parser.add_argument('-a', '--all', help='Clear out the database and get historical data', action='store_true')
parser.add_argument('-l', '--latest', help='Get by last_mod_date stored in DB', action='store_true')
parser.add_argument('-d', '--delivery', help='Used in conjunction with -a to indicate delivery order feed',
action='store_true')
parser.add_argument('-o', '--other',
help='Used in conjunction with -a to indicate all feeds other than delivery order',
action='store_true')
parser.add_argument('-da', '--dates', help='Used in conjunction with -l to specify dates to gather updates from.'
'Should have 2 arguments, first and last day, formatted YYYY/mm/dd',
nargs=2, type=str)
parser.add_argument('-del', '--delete', help='Used to only run the delete feed. First argument must be "both", '
'"idv", or "award". The second and third arguments must be the first '
'and last day to run the feeds for, formatted YYYY/mm/dd',
nargs=3, type=str)
args = parser.parse_args()
award_types_award = ["BPA Call", "Definitive Contract", "Purchase Order", "Delivery Order"]
award_types_idv = ["GWAC", "BOA", "BPA", "FSS", "IDC"]
metrics_json = {
'script_name': 'pull_fpds_data.py',
'start_time': str(now),
'records_received': 0,
'deletes_received': 0,
'records_deleted': 0,
'deleted_award_records_file': '',
'deleted_idv_records_file': '',
'start_date': '',
'end_date': ''
}
sub_tier_list, country_list, state_code_list, county_by_name, county_by_code, exec_comp_dict = create_lookups(sess)
if args.all:
if (not args.delivery and not args.other) or (args.delivery and args.other):
logger.error("When using the -a flag, please include either -d or -o "
"(but not both) to indicate which feeds to read in")
raise ValueError("When using the -a flag, please include either -d or -o "
"(but not both) to indicate which feeds to read in")
logger.info("Starting at: %s", str(datetime.datetime.now()))
if args.other:
for award_type in award_types_idv:
get_data("IDV", award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, metrics=metrics_json)
for award_type in award_types_award:
if award_type != "Delivery Order":
get_data("award", award_type, now, sess, sub_tier_list, county_by_name, county_by_code,
state_code_list, country_list, exec_comp_dict, metrics=metrics_json)
elif args.delivery:
get_data("award", "Delivery Order", now, sess, sub_tier_list, county_by_name, county_by_code,
state_code_list, country_list, exec_comp_dict, metrics=metrics_json)
last_update = sess.query(FPDSUpdate).one_or_none()
if last_update:
sess.query(FPDSUpdate).update({"update_date": now}, synchronize_session=False)
else:
sess.add(FPDSUpdate(update_date=now))
sess.commit()
logger.info("Ending at: %s", str(datetime.datetime.now()))
elif args.latest:
logger.info("Starting at: %s", str(datetime.datetime.now()))
last_update_obj = sess.query(FPDSUpdate).one_or_none()
# update_date can't be null because it's being used as the PK for the table, so it can only exist or
# there are no rows in the table. If there are no rows, act like it's an "add all"
if not last_update_obj:
logger.error(
"No last_update date present, please run the script with the -a flag to generate an initial dataset")
raise ValueError(
"No last_update date present, please run the script with the -a flag to generate an initial dataset")
last_update = last_update_obj.update_date
start_date = None
end_date = None
if args.dates:
start_date = args.dates[0]
end_date = args.dates[1]
for award_type in award_types_idv:
get_data("IDV", award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, last_update, start_date=start_date, end_date=end_date,
metrics=metrics_json)
for award_type in award_types_award:
get_data("award", award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, last_update, start_date=start_date, end_date=end_date,
metrics=metrics_json)
# We also need to process the delete feed
get_delete_data("IDV", now, sess, last_update, start_date, end_date, metrics=metrics_json)
get_delete_data("award", now, sess, last_update, start_date, end_date, metrics=metrics_json)
if not start_date and not end_date:
sess.query(FPDSUpdate).update({"update_date": now}, synchronize_session=False)
sess.commit()
logger.info("Ending at: %s", str(datetime.datetime.now()))
elif args.delete:
del_type = args.delete[0]
if del_type == 'award':
del_awards = True
del_idvs = False
elif del_type == 'idv':
del_awards = False
del_idvs = True
elif del_type == 'both':
del_awards = True
del_idvs = True
else:
logger.error("Delete argument must be \"idv\", \"award\", or \"both\"")
raise ValueError("Delete argument must be \"idv\", \"award\", or \"both\"")
if del_idvs:
get_delete_data("IDV", now, sess, now, args.delete[1], args.delete[2], metrics=metrics_json)
if del_awards:
get_delete_data("award", now, sess, now, args.delete[1], args.delete[2], metrics=metrics_json)
sess.commit()
metrics_json['duration'] = str(datetime.datetime.now() - now)
with open('pull_fpds_data_metrics.json', 'w+') as metrics_file:
json.dump(metrics_json, metrics_file)
# TODO add a correct start date for "all" so we don't get ALL the data or too little of the data
# TODO fine-tune indexing
if __name__ == '__main__':
with create_app().app_context():
configure_logging()
main()
| dataactcore/scripts/pull_fpds_data.py | 89,512 | Get values from the awardID level of the xml
calculate values that aren't in any feed (or haven't been provided properly) for legal entity
calculate values that aren't in any feed (or haven't been provided properly) for place of performance
Calculate values that aren't in any feed but can be calculated.
Args:
obj: a dictionary containing the details we need to derive from and to
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
Returns:
the object originally passed in with newly-calculated values added
Get values from the competition level of the xml
Get values from the contractData level of the xml
Get values from the contractID level of the xml
Create the lookups used for FPDS derivations.
Args:
sess: connection to database
Returns:
Dictionaries of sub tier agencies by code, country names by code, county names by state code + county
code, county codes by state code + county name, state name by code, and executive compensation data by
DUNS number
Create a list of processed data
Args:
data: an object containing the data gathered from the feed
sess: the database connection
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
Returns:
A list containing the processed and calculated data.
Get values from the dollarValues level of the xml
Get values from the genericTags level of the xml
Get the data from the atom feed based on contract/award type and the last time the script was run.
Args:
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
award_type: a string indicating what the award type of the feed being checked is
now: a timestamp indicating the time to set the updated_at to
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
last_run: a date indicating the last time the pull was run
threaded: a boolean indicating whether the process is running as a thread or not
start_date: a date indicating the first date to pull from (must be provided with end_date)
end_date: a date indicating the last date to pull from (must be provided with start_date)
metrics: a dictionary to gather metrics for the script in
specific_params: a string containing a specific set of params to run the query with (used for outside
scripts that need to run a data load)
Get data from the delete feed
Retrieve the total number of expected records based on the last paginated URL
Retrieve data from FPDS, allow for multiple retries and timeouts
Get values from the legislativeMandates level of the xml
Get values from the placeOfPerformance level of the xml
Start the processing for data and add it to the DB.
Args:
data: an object containing the data gathered from the feed
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
now: a timestamp indicating the time to set the updated_at to
threaded: a boolean indicating whether the process is running as a thread or not
Process the data coming in.
Args:
data: an object containing the data gathered from the feed
sess: the database connection
atom_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
Returns:
An object containing the processed and calculated data.
process the delete feed data coming in
Get values from the productOrServiceInformation level of the xml
Get values from the purchaserInformation level of the xml
Get values from the relevantContractDates level of the xml
Get values from the totalDollarValues level of the xml
Get values from the vendorSiteDetails level of the xml (sub-level of vendor)
Get values from the vendor level of the xml
noqa noqa Used for asyncio get requests against the ATOM feed make a list so it's consistent If it's now a string, we want to strip it if the zip code is not a valid US zip, toss the entire zip if we have a 9 digit code, grab the first match for 9 digit zips if it's not 9 digits or we found no results from the 9 digit we received if we found results at any point, return the county code from it get agencyID name get agencyID name get descriptions for things in the value map get descriptions for things in the value map if there is only one dict, convert it to a list of one dict get descriptions for things in the value map placeOfPerformanceName placeOfPerformanceName within placeOfPerformance, the principalPlaceOfPerformance sub-level get descriptions for things in the value map get descriptions for things in the value map get country of origin name get descriptions for things in the value map name values associated with certain values in purchaserInformation base vendor level get descriptions for things in the value map vendorHeader sub-level make sure key exists before passing it vendorSiteDetails sub-level (there are a lot so it gets its own function) base vendorSiteDetails level typeOfEducationalEntity sub-level typeOfGovernmentEntity sub-level vendorBusinessTypes sub-level vendorBusinessTypes > businessOrOrganizationType sub-level vendorBusinessTypes > federalGovernment sub-level vendorBusinessTypes > localGovernment sub-level vendorCertifications sub-level entityIdentifiers sub-level entityIdentifiers > vendorDUNSInformation sub-level entityIdentifiers > vendorUEIInformation sub-level vendorLineOfBusiness sub-level vendorLocation sub-level differentiating between US and foreign states need to set this even if we're not going to be having a code because we need to access it later if it is in the USA, grab the description for the state getting the name associated with the country code vendorOrganizationFactors sub-level vendorOrganizationFactors > profitStructure sub-level vendorRelationshipWithFederalGovernment sub-level vendorSocioEconomicIndicators sub-level vendorSocioEconomicIndicators > minorityOwned sub-level only do any of these calculation if the country code is in the list of US territories If it's in the list but not USA, find its state code in the list and put that in the state code spot, get the state name, then replace country code and country description with USA and UNITED STATES respectively derive state name if we don't have it calculate place of performance county code make sure they gave us a valid state and then check if it's in our lookup if accessing the county code by state code and county name didn't work, try by zip4a if we have it if we didn't have a county name but got the county code, we can grab the name if we have content in the zip code and it's in a valid US format, split it into 5 and 4 digit if there is any country code (checked outside function) but not a country name, try to get the country name do legal entity derivations only if legal entity country code is in a US territory of any kind if it's in the list but not USA, find its state code in the list and put that in the state code spot, get the state name, then replace country code and country description with USA and UNITED STATES respectively derive state name if we don't have it calculate legal entity county code and split zip when possible if we have a county code and a state code, we can try to get the county name make sure they gave us a valid state and then check if it's in our lookup if there is any country code (checked outside function) but not a country name, try to get the country name we want to null out all the calculated columns in case this is an update to the records calculate awarding agency codes/names based on awarding sub tier agency codes calculate funding agency codes/names based on funding sub tier agency codes do place of performance calculations only if we have SOME country code do legal entity calculations only if we have SOME country code calculate business categories Calculate executive compensation data for the entry. Need to make sure they're null in case this is updating and the DUNS has changed somehow calculate unique award key Get the value in the object or, if the key doesn't exist or value is None, set it to "-none-" calculate unique key Get the value in the object or, if the key doesn't exist or value is None, set it to "-none-" The order of the unique key is agency_id, referenced_idv_agency_iden, piid, award_modification_amendme, parent_award_id, transaction_number make sure key exists before passing it transaction_number is a part of the unique identifier, set it to None make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it these values need to be filled so the existence check when calculating county data doesn't freak out make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it make sure key exists before passing it clear out potentially excel-breaking whitespace from specific fields order of unique constraints in string: agency_id, referenced_idv_agency_iden, piid, award_modification_amendme, parent_award_id, transaction_number get all values that make up unique key referenced_idv_agency_iden not used in IDV identifier, just set it to "-none-" parent_award_id not used in IDV identifier and transaction_number not in IDV feed, just set them to "-none-" we should always expect entries, otherwise we shouldn't be calling it get a single call so we can find the last page retrieve all URLs retrieve the "last" URL from the list retrieve the count from the URL of the last page retrieve the page from the final_request_url retrieve the last page of data If a specific set of params was provided, use that if a date that the script was last successfully run is not provided, get all data if a date that the script was last successfully run is provided, get data since that date retrieve the total count of expected records for this pull pull in the next MAX_ENTRIES * REQUESTS_AT_ONCE until we get anything less than the MAX_ENTRIES adding +1 to ensure that they're not adding anything since we got the expected count End async get requests def Find entries that don't have FPDS content and print them all Log which one we're on so we can keep track of how far we are, insert into DB ever 1k lines if we got less than the full set of records, we can stop calling the feed ensure we loaded the number of records we expected to, otherwise we'll need to reload If we just call deletes, we have to set the date. If we don't provide dates, some other part has to have run already so this is the only place it needs to get set. retrieve the total count of expected records for this pull only list the data if there's data to list Every 100 lines, log which one we're on so we can keep track of how far we are if we got less than the full set of records we can stop calling the feed ensure we loaded the number of records we expected to, otherwise we'll need to reload get last modified date only add to delete list if the last modified date is later than the existing entry's last modified date only need to delete values if there's something to delete writing the file add headers get and create list of sub tier agencies get and create list of country code -> country name mappings. get and create list of state code -> state name mappings. Prime the county lists with state codes Fill the county lists with data (code -> name mappings and name -> code mappings) we don't want any "(CA)" endings, so strip those we want all the counties in our by-code lookup because we'd be using this table anyway for derivations if the county name has only letters/spaces then we want it in our by-name lookup, the rest have the potential to be different from the FPDS feed get and create list of duns -> exec comp data mappings update_date can't be null because it's being used as the PK for the table, so it can only exist or there are no rows in the table. If there are no rows, act like it's an "add all" We also need to process the delete feed TODO add a correct start date for "all" so we don't get ALL the data or too little of the data TODO fine-tune indexing | 15,142 | en | 0.850632 |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .AuditModel import AuditModel
from django.db import models
class ScreenIntakeMethodCode(AuditModel):
"""
Refers to the type of intake mechanism for a well screen, i.e. Screen, Open Bottom, Uncased Hole.
"""
screen_intake_code = models.CharField(primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateTimeField(blank=True, null=True)
expiry_date = models.DateTimeField(blank=True, null=True)
class Meta:
db_table = 'screen_intake_method_code'
ordering = ['display_order', 'description']
def __str__(self):
return self.description
| app/gwells/models/ScreenIntakeMethodCode.py | 1,289 | Refers to the type of intake mechanism for a well screen, i.e. Screen, Open Bottom, Uncased Hole.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | 621 | en | 0.885207 |
from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
unquote)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \r\n, there's more.
if HIREDIS_USE_BYTE_BUFFER:
if bufflen > 2 and \
self._buffer[bufflen - 2:bufflen] != SYM_CRLF:
continue
else:
if not buffer.endswith(SYM_CRLF):
continue
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
| redis/connection.py | 37,064 | Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
Manages TCP communication to and from a Redis server
Generic connection pool
Parser class for connections using Hiredis
Plain Python parsing class
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
Create a TCP socket connection
Wrap the socket with SSL support
Create a Unix domain socket connection
Poll the socket to see if there's data that can be read.
Connects to the Redis server if not already connected
Disconnects from the Redis server
Disconnects all connections in the pool
Disconnects all connections in the pool.
Return a bytestring representation of the value
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
Get a connection from the pool
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
Create a new connection
Make a fresh connection.
Called when the socket connects
Initialize the connection, authenticate and select a database
Called when the socket disconnects
Pack a series of arguments into the Redis protocol
Pack multiple commands into the Redis protocol
Parse an error response
Read the response from a previously sent command
Releases the connection back to the pool
Releases the connection back to the pool.
Pack and send a command to the Redis server
Send an already packed command to the Redis server
only use byte buffer if hiredis supports it and the Python version is >= 2.7 number of bytes written to the buffer from the socket number of bytes read from the buffer an empty string indicates the server shutdown the socket make sure to read the \r\n terminator make sure we've read enough data from the socket purge the buffer when we've consumed it all so it doesn't grow forever there's more data in the socket that we need purge the buffer when we've consumed it all so it doesn't grow forever server returned an error if the error is a ConnectionError, raise immediately so the user is notified otherwise, we're dealing with a ResponseError that might belong inside a pipeline response. the connection's read_response() and/or the pipeline's execute() will raise this error if necessary, so just return the exception instance here. single value int value bulk response multi-bulk response hiredis < 0.1.3 doesn't support functions that create exceptions _next_response might be cached from a can_read() call an empty string indicates the server shutdown the socket proactively, but not conclusively, check if more data is in the buffer. if the data received doesn't end with \r\n, there's more. if an older version of hiredis is installed, we need to attempt to convert ResponseErrors to their appropriate types. if the response is a ConnectionError or the response is a list and the first item is a ConnectionError, raise it as something bad happened clean up after any error in on_connect run any user callbacks. right now the only internal callback is for pubsub channel/pattern resubscription we want to mimic what socket.create_connection does to support ipv4/ipv6, but we want to set options prior to calling socket.connect() TCP_NODELAY TCP_KEEPALIVE set the socket_connect_timeout before we connect connect set the socket_timeout now that we're connected args for socket.error can either be (errno, "message") or just "message" if a password is specified, authenticate if a database is specified, switch to it the client might have included 1 or more literal arguments in the command name, e.g., 'CONFIG GET'. The Redis server expects these arguments to be sent separately, so split the first argument manually. All of these arguements get wrapped in the Token class to prevent them from being encoded. to avoid large string mallocs, chunk the command into the output list if we're sending large values args for socket.error can either be (errno, "message") or just "message" in python2.6, custom URL schemes don't recognize querystring values they're left as part of the url.path. chop the querystring including the ? off the end of the url and reparse it. We only support redis:// and unix:// schemes. If there's a path argument, use it as the db argument if a querystring value wasn't specified last shot at the db value update the arguments from the URL values backwards compatability another thread already did the work while we waited on the lock. Create and fill up a thread safe queue with ``None`` values. Keep a list of actual connection instances so that we can disconnect them later. Make sure we haven't changed process. Try and get a connection from the pool. If one isn't available within self.timeout then raise a ``ConnectionError``. Note that this is not caught by the redis client and will be raised unless handled by application code. If you want never to If the ``connection`` is actually ``None`` then that's a cue to make a new connection to add to the pool. Make sure we haven't changed process. Put the connection back into the pool. perhaps the pool has been reset() after a fork? regardless, we don't want this connection | 8,278 | en | 0.83408 |
#!/usr/bin/python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old BitcoinPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/BitcoindComparisonTool_jar/BitcoindComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| qa/pull-tester/pull-tester.py | 9,137 | !/usr/bin/python Copyright (c) 2013 The Bitcoin Core developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. output to /dev/null by default: Init Merge onto upstream/master Remove old BitcoinPullTester comments (I'm being lazy and not paginating here) | 343 | en | 0.686894 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide Bokeh model "building block" classes.
One of the central design principals of Bokeh is that, regardless of
how the plot creation code is spelled in Python (or other languages),
the result is an object graph that encompasses all the visual and
data aspects of the scene. Furthermore, this *scene graph* is to be
serialized, and it is this serialized graph that the client library
BokehJS uses to render the plot. The low-level objects that comprise
a Bokeh scene graph are called :ref:`Models <bokeh.model>`.
'''
# This file is excluded from flake8 checking in setup.cfg
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.property.dataspec import expr, field, value # Legacy API
from ..model import Model
from .annotations import * # lgtm [py/polluting-import]
from .axes import *
from .callbacks import *
from .canvas import *
from .expressions import *
from .filters import *
from .formatters import *
from .glyphs import *
from .graphs import *
from .grids import *
from .labeling import *
from .layouts import *
from .map_plots import *
from .mappers import *
from .plots import *
from .ranges import *
from .renderers import *
from .scales import *
from .selections import *
from .sources import *
from .text import *
from .textures import *
from .tickers import *
from .tiles import *
from .tools import *
from .transforms import *
from .widgets import * # lgtm [py/polluting-import]
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# __all__ = include all explicit transitive imports above
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bokeh/models/__init__.py | 3,078 | Provide Bokeh model "building block" classes.
One of the central design principals of Bokeh is that, regardless of
how the plot creation code is spelled in Python (or other languages),
the result is an object graph that encompasses all the visual and
data aspects of the scene. Furthermore, this *scene graph* is to be
serialized, and it is this serialized graph that the client library
BokehJS uses to render the plot. The low-level objects that comprise
a Bokeh scene graph are called :ref:`Models <bokeh.model>`.
----------------------------------------------------------------------------- Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. All rights reserved. The full license is in the file LICENSE.txt, distributed with this software.----------------------------------------------------------------------------- This file is excluded from flake8 checking in setup.cfg----------------------------------------------------------------------------- Boilerplate----------------------------------------------------------------------------- isort:skip----------------------------------------------------------------------------- Imports----------------------------------------------------------------------------- Bokeh imports Legacy API lgtm [py/polluting-import] lgtm [py/polluting-import]----------------------------------------------------------------------------- Globals and constants----------------------------------------------------------------------------- __all__ = include all explicit transitive imports above----------------------------------------------------------------------------- General API---------------------------------------------------------------------------------------------------------------------------------------------------------- Dev API---------------------------------------------------------------------------------------------------------------------------------------------------------- Private API---------------------------------------------------------------------------------------------------------------------------------------------------------- Code----------------------------------------------------------------------------- | 2,196 | en | 0.459367 |
"""
Django settings for Django_app project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'oy*@!577+db+r(d$6d1x*ftp*5v-0#+3cac^0f7-+c0%6xhg$$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'MeetMeHere.apps.MeetmehereConfig',
'channels',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'Django_app.wsgi.application'
ASGI_APPLICATION = 'Django_app.routing.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.contrib.gis.db.backends.postgis',
# 'NAME': 'finalYearProject',
# 'USER': 'postgres',
# 'PASSWORD': 'admin',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mmh_db',
'USER': 'app_user',
'PASSWORD': 'Martins.24',
'HOST': '172.18.0.3',
}
}
# channels layer config
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("172.18.0.2", 6379)],
},
},
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [("localhost", 6379)],
# },
# },
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# rest framework schemas
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
]
}
# model used for authentication
AUTH_USER_MODEL = 'MeetMeHere.User'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
# GDAL_LIBRARY_PATH = 'D:\\Programms\\OSGeo4W\\bin\\gdal202.dll'
| Django_app/Django_app/settings.py | 4,529 | Django settings for Django_app project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition WSGI_APPLICATION = 'Django_app.wsgi.application' Database https://docs.djangoproject.com/en/1.11/ref/settings/databases DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'finalYearProject', 'USER': 'postgres', 'PASSWORD': 'admin', } } channels layer config CHANNEL_LAYERS = { "default": { "BACKEND": "channels_redis.core.RedisChannelLayer", "CONFIG": { "hosts": [("localhost", 6379)], }, }, } Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators rest framework schemas model used for authentication Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ GDAL_LIBRARY_PATH = 'D:\\Programms\\OSGeo4W\\bin\\gdal202.dll' | 1,558 | en | 0.494838 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Milan Ondrasovic <milan.ondrasovic@gmail.com>
import random
from typing import Sequence, Tuple, Dict, cast
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tracking import TrackedDetection
ColorT = Tuple[int, int, int]
PointT = Tuple[int, int]
def labeled_rectangle(
image: np.ndarray, start_point: PointT, end_point: PointT, label: str,
rect_color: ColorT, label_color: ColorT, alpha: float = 0.85):
(x1, y1), (x2, y2) = start_point, end_point
roi = image[y1:y2, x1:x2]
rect = np.ones_like(roi) * 255
image[y1:y2, x1:x2] = cv.addWeighted(roi, alpha, rect, 1 - alpha, 0)
font_face = cv.FONT_HERSHEY_COMPLEX_SMALL
font_scale = 1
font_thickness = 3
(text_width, text_height), baseline = cv.getTextSize(
label, font_face, font_scale, font_thickness)
text_rect_end = (
start_point[0] + text_width, start_point[1] + text_height + baseline)
cv.rectangle(image, start_point, text_rect_end, rect_color, -1)
# TODO Somehow calculate the shift.
text_start_point = (start_point[0] + 1, start_point[1] + text_height + 3)
cv.putText(
image, label, text_start_point, font_face, font_scale, label_color,
font_thickness, cv.LINE_AA)
cv.putText(
image, label, text_start_point, font_face, font_scale, (255, 255, 255),
max(1, font_thickness - 2), cv.LINE_AA)
cv.rectangle(image, start_point, end_point, rect_color, 2, cv.LINE_AA)
class TrackingVisualizer:
def __init__(self, n_colors: int) -> None:
assert n_colors > 0
self.colors: Sequence[ColorT] = self.init_colors(n_colors, True)
self.track_color: Dict[int, ColorT] = {}
def draw_tracks(
self, image: np.ndarray,
tracks: Sequence[TrackedDetection]) -> None:
for track in tracks:
text = str(track.track_id)
text_color = self._get_text_color()
annotation_color = self._get_annotation_color(track)
labeled_rectangle(
image, track.box.top_left, track.box.bottom_right, text,
annotation_color, text_color)
def _get_text_color(self) -> ColorT:
return (16, 16, 16)
def _get_annotation_color(self, track: TrackedDetection) -> ColorT:
color = self.track_color.get(track.track_id)
if color is not None:
return color
color_pos = len(self.track_color) % len(self.colors)
color = self.colors[color_pos]
self.track_color[track.track_id] = color
return cast(ColorT, color)
@staticmethod
def init_colors(n_colors: int, randomize: bool = False) -> Sequence[ColorT]:
color_map = plt.cm.get_cmap('Spectral', n_colors)
colors = [
tuple(int(round(c * 255)) for c in color_map(i)[:3])
for i in range(n_colors)]
if randomize:
random.shuffle(colors)
return cast(Sequence[ColorT], colors)
| vehicle_tracker/visual.py | 3,047 | !/usr/bin/env python -*- coding: utf-8 -*- Author: Milan Ondrasovic <milan.ondrasovic@gmail.com> TODO Somehow calculate the shift. | 130 | en | 0.388984 |
""" Parses the `run.dat` input file for MechDriver that specifices all
of calculations to run for a given session of the code.
Specifcally, looks for and parses several subsections:
(1) `input` block: various input
(2) `pes' block: idxs denoting what PESs in mech file to run
(3) `spc` block: idxs denoting what species in .csv file to run
(4) `els tasks` block: set of tasks for ESDriver
(5) `therm tasks` block: set of tasks for ThermDriver
(6) `ktp tasks` block: set of tasks for kTPDriver
(7) `trans tasks` block: set of tasks for TransDriver
(8) `proc tasks` block: set of tasks for ProcDriver
Function parses the strings and converts them into formatted dictionaries
that are passed to the sub-drivers of the code:
ESDriver, ThermoDriver, kTPDriver, TransDriver, ProcDriver
These dictionaries are built in three stages:
(1) filled with user-specified options
(2) default values not defined by the user are added, and
(3) assessed that all keywordws and values are supported by the code.
"""
import sys
import automol
import ioformat
from mechlib.amech_io.printer import error_message
from mechlib.amech_io.parser._keywrd import defaults_from_val_dct
from mechlib.amech_io.parser._keywrd import defaults_from_key_val_dcts
from mechlib.amech_io.parser._keywrd import check_dct1
from mechlib.amech_io.parser._keywrd import check_thy_lvls
# DICTIONARIES OF DEFAULTS #
# Run Keywords
RUN_INP_REQ = [
'inp_mech', 'out_mech', 'inp_spc', 'out_spc', 'run_prefix', 'save_prefix']
RUN_INP_VAL_DCT = {
'inp_mech': ((str,), ('chemkin'), 'chemkin'),
'inp_spc': ((str,), ('csv',), 'csv'),
'out_mech': ((str,), ('chemkin'), 'chemkin'),
'out_spc': ((str,), ('csv',), 'csv'),
'print_mech': ((bool,), (True, False), False),
'print_debug': ((bool,), (True, False), False),
'run_prefix': ((str,), (), None),
'save_prefix': ((str,), (), None)
}
# HANDLE TASK KEYS
# Commonly useful task keyword lists
BASE = ('runlvl', 'inplvl', 'retryfail', 'overwrite')
MREF = ('var_splvl1', 'var_splvl2', 'var_scnlvl')
TRANS = ('bath', 'njobs', 'nsamp', 'conf')
PRNT = ('geolvl', 'proplvl', 'cnf_range', 'sort')
# Supported object types for task (useful if task requestes 'all')
SUPP_OBJS = ('spc', 'ts')
# Determines what objects and keywords are allowed for tasks for ES,Trans,Print
# Need way to set required tsks
# Tasks: (allowed obj, allowed_keywords)
TSK_KEY_DCT = {
# Electronic Structure Driver Tasks
'init_geom': (('spc',), BASE),
'find_ts': (('spc', 'ts'), BASE + MREF + ('nobarrier',)), # 're_id')),
'conf_pucker': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_samp': (('spc', 'ts'), BASE + ('cnf_range', 'sort', 'resave',)),
'conf_energy': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_grad': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_hess': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_vpt2': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_prop': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'conf_opt': (('spc', 'ts'), BASE + ('cnf_range', 'sort',)),
'hr_scan': (('spc', 'ts'), BASE + ('tors_model', 'resamp_min',
'cnf_range', 'sort',)),
'hr_grad': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_hess': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_energy': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_vpt2': (('spc', 'ts'), BASE + ('tors_model', 'cnf_range', 'sort',)),
'hr_reopt': (('spc', 'ts'), BASE + ('tors_model', 'hrthresh',
'cnf_range', 'sort',)),
'tau_samp': (('spc', 'ts'), BASE + ('resave',)),
'tau_energy': (('spc', 'ts'), BASE),
'tau_grad': (('spc', 'ts'), BASE),
'tau_hess': (('spc', 'ts'), BASE + ('hessmax',)),
'rpath_scan': (('ts',), BASE + ('rxncoord',)),
'rpath_energy': (('ts',), BASE + ('rxncoord',)),
'rpath_grad': (('ts',), BASE + ('rxncoord',)),
'rpath_hess': (('ts',), BASE + ('rxncoord',)),
# Transport Driver Tasks
'onedmin': (('spc',), (BASE + TRANS)),
'write_transport': (('spc',), (BASE + TRANS)),
# Process Driver Tasks
'freqs': (('spc', 'ts', 'vdw'), PRNT + ('scale',)),
'energy': (('spc', 'ts'), PRNT),
'geo': (('spc', 'ts'), PRNT),
'molden': (('spc', 'ts'), PRNT),
'zmatrix': (('spc', 'ts'), PRNT),
'torsions': (('spc', 'ts'), PRNT),
'enthalpy': (('spc', 'ts'), PRNT),
'pf': (('spc', 'ts'), PRNT),
'messpf_inp': (('spc', 'ts'), PRNT),
'coeffs': (('spc', 'ts'), ()),
# KTP/Therm
'write_mess': ((), ('kin_model', 'spc_model', 'overwrite',
'use_well_extension', 'float_precision',
'cnf_range', 'sort')),
'run_mess': ((), ('kin_model', 'spc_model', 'nprocs',
'cnf_range', 'sort')),
'run_fits': ((), ('kin_model', 'cnf_range', 'sort')),
}
# tsk: (object types, (allowed values), default) # use functions for weird
# maybe the required checks use if None given?
TSK_VAL_DCT = {
# Common
'runlvl': ((str,), (), None),
'inplvl': ((str,), (), None),
'var_splvl1': ((str,), (), None),
'var_splvl2': ((str,), (), None),
'var_scnlvl': ((str,), (), None),
'resave': ((bool,), (True, False), False),
'retryfail': ((bool,), (True, False), True),
'overwrite': ((bool,), (True, False), False),
# ES
'cnf_range': ((str,), (), 'min'), # change to econfs, nconfs
'sort': ((str,), (), None),
'hessmax': ((int,), (), 1000),
'tors_model': ((str,),
('1dhr', '1dhrf', '1dhrfa', 'mdhr', 'mdhrv'), '1dhr'),
'resamp_min': ((bool,), (True, False), False),
'hrthresh': ((float,), (), -0.2),
'potthresh': ((float,), (), 0.3),
'rxncoord': ((str,), ('irc', 'auto'), 'auto'),
'nobarrier': ((str,), ('pst', 'rpvtst', 'vrctst'), None),
're_id': ((bool,), (True, False), False),
# Trans
'njobs': ((int,), (), 1),
'nsamp': ((int,), (), 1),
'conf': ((str,), ('sphere', 'min'), 'sphere'),
# Proc
'geolvl': ((str,), (), None),
'proplvl': ((str,), (), None),
'nconfs': ((str,), (), 'min'),
'econfs': ((str,), (), 'min'),
'scale': ((str,), (), None),
# KTP/Therm
'kin_model': ((str,), (), None),
'spc_model': ((str,), (), None),
'nprocs': ((int,), (), 10),
'use_well_extension': ((bool,), (), False),
'linked_pes': ((tuple,), (), None),
'float_precision': ((str,), ('double', 'quadruple'), 'double'),
}
# Have nconfs and econfs keywords and combine them to figure out which to use?
# INPUT PARSERS #
# Input Section
def input_dictionary(run_str):
""" Parses the `input` block and builds a
dictionary of keywords and their corresponding values.
:param run_str: input string of the run.dat block
:type run_str: str
:rtype: dict[str: obj]
"""
# Read the input block
inp_block = ioformat.ptt.end_block(run_str, 'input', footer='input')
inp_dct = ioformat.ptt.keyword_dct_from_block(inp_block)
# Add defaults to the dictionary
inp_dct = automol.util.dict_.right_update(
defaults_from_val_dct(RUN_INP_VAL_DCT), inp_dct)
# Check the dictionary
check_dct1(inp_dct, RUN_INP_VAL_DCT, RUN_INP_REQ, 'Run-Input')
return inp_dct
# Chemistry objects
def chem_idxs(run_str):
""" Parses the `pes` block of the run.dat file and
builds a dictionary of the PESs and corresponding channels the
user wishes to run.
Parses the `spc` block of the run.dat file and
builds a dictionary of the species the
user wishes to run.
May break if idx is given on two lines of string.
:param run_str: string of the run.dat input file
:type run_str: str
:returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs})
:rtype: dict[str: tuple]
"""
# PES idxs to run
pes_block = ioformat.ptt.end_block(run_str, 'pes', footer='pes')
if pes_block is not None:
_pes_idxs = {}
for line in pes_block.strip().splitlines():
[pes_nums, chn_nums] = line.split(':')
_pes_nums = ioformat.ptt.idx_lst_from_line(pes_nums)
_chn_nums = ioformat.ptt.idx_lst_from_line(chn_nums)
for idx in _pes_nums:
_pes_idxs.update({idx-1: tuple(val-1 for val in _chn_nums)})
else:
_pes_idxs = None
# SPC idxs to run
spc_block = ioformat.ptt.end_block(run_str, 'spc', footer='spc')
if spc_block is not None:
_idxs = ()
for line in spc_block.splitlines():
_idxs += ioformat.ptt.idx_lst_from_line(line)
_spc_idxs = {1: tuple(val-1 for val in _idxs)}
else:
_spc_idxs = None
# Kill code if no idxs given
if _pes_idxs is None and _spc_idxs is None:
error_message('No pes or spc section given in run.dat file. Quitting')
sys.exit()
return _pes_idxs, _spc_idxs
# Driver Task Lists
def extract_task(tsk, tsk_lst):
""" Searches for a task in the task lst and if found:
the corresponding keywords and values will be returned
Function only works if task is present in the list one time.
:param tsk: task to extract information for
:type tsk: str
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
"""
tsk_inf = None
for _tsk_inf in tsk_lst:
if any(x == tsk for x in _tsk_inf): # just looks in all pars
tsk_inf = _tsk_inf
break
return tsk_inf
def tasks(run_str, thy_dct):
""" runstr
"""
# Read blocks and build user determined task lists`
es_block = ioformat.ptt.end_block(run_str, 'els', footer='els')
trans_block = ioformat.ptt.end_block(run_str, 'trans', footer='trans')
therm_block = ioformat.ptt.end_block(run_str, 'thermo', footer='thermo')
ktp_block = ioformat.ptt.end_block(run_str, 'ktp', footer='ktp')
proc_block = ioformat.ptt.end_block(run_str, 'proc', footer='proc')
# print('els\n', es_block)
# print('therm\n', therm_block)
# print('trans\n', trans_block)
# print('proc\n', proc_block)
es_tsks = _tsk_lst(es_block, 3)
therm_tsks = _tsk_lst(therm_block, 2)
ktp_tsks = _tsk_lst(ktp_block, 2)
trans_tsks = _tsk_lst(trans_block, 3)
proc_tsks = _tsk_lst(proc_block, 3)
# Add defaults to each task as needed
es_tsks = _tsk_defaults(es_tsks)
therm_tsks = _tsk_defaults(therm_tsks)
ktp_tsks = _tsk_defaults(ktp_tsks)
trans_tsks = _tsk_defaults(trans_tsks)
proc_tsks = _tsk_defaults(proc_tsks)
# Assess each dictionary for correctness
_check_tsks(es_tsks, thy_dct)
_check_tsks(therm_tsks, thy_dct)
_check_tsks(ktp_tsks, thy_dct)
_check_tsks(trans_tsks, thy_dct)
_check_tsks(proc_tsks, thy_dct)
tsk_dct = {
'es': es_tsks,
'thermo': therm_tsks,
'ktp': ktp_tsks,
'trans': trans_tsks,
'proc': proc_tsks
}
return tsk_dct
def _tsk_lst(tsk_str, num):
""" Set the sequence of electronic structure tasks for a given
species or PESs
"""
# Build the task lists from the string
if tsk_str is not None:
tsks = []
tsk_str = ioformat.remove_whitespace_from_string(tsk_str)
for line in tsk_str.splitlines():
_tsk = _split_line(line, num)
tsks.append(_tsk)
mod_tsks = tsks
# mod_tsks = _expand_tsks(tsks) if num == 3 else tsks
else:
mod_tsks = None
return mod_tsks
def _expand_tsks(tsks_lst):
""" Loops over the driver task list and checks if each task is a
macro-task that should be expanded into sub-tasks.
Right now, it splits all obj tasks into spc and ts
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
"""
mod_tsks_lst = []
for tsk_lst in tsks_lst:
[obj, tsk, dct] = tsk_lst
objs = ['spc', 'ts'] if obj == 'all' else [obj]
for obj in objs:
mod_tsks_lst.append([obj, tsk, dct])
return mod_tsks_lst
def _tsk_defaults(tsk_lst):
""" Fill out the keyword dictionaries for various task lists with
default values
"""
if tsk_lst is not None:
mod_tsk_lst = []
for _tsk_lst in tsk_lst:
keyword_dct = _tsk_lst[-1]
tsk = _tsk_lst[:-1][-1]
default_dct = defaults_from_key_val_dcts(
tsk, TSK_KEY_DCT, TSK_VAL_DCT)
new_key_dct = automol.util.dict_.right_update(
default_dct, keyword_dct)
mod_lst = _tsk_lst[:-1] + [new_key_dct]
mod_tsk_lst.append(mod_lst)
else:
mod_tsk_lst = None
return mod_tsk_lst
def _check_tsks(tsk_lsts, thy_dct):
""" Loop over all of the tasks, add default keywords and parameters
and assesses if all the input is valid
"""
if tsk_lsts is not None:
for tsk_lst in tsk_lsts:
# Unpack the task
_tsk = tsk_lst[:-1]
if len(_tsk) == 2:
# Case(1): spc task keywords (ESDriver)
obj, tsk = _tsk[0], _tsk[1]
else:
# Case(2): task keywords (ThermoDriver, kTPDriver)
obj, tsk = None, _tsk[0]
key_dct = tsk_lst[-1]
# Check if the obj is allowed
if obj is not None:
# Have to make lst to handle case where obj == 'all'
obj_lst = SUPP_OBJS if obj == 'all' else (obj,)
for _obj in obj_lst:
if _obj not in TSK_KEY_DCT[tsk][0]:
error_message(f'obj {obj}, not allowed for {tsk}')
sys.exit()
# Check if keyword values are allowed
check_dct1(key_dct, TSK_VAL_DCT, (), 'Task')
# Check keywords with thylvls as values use lvls defined in thy dct
check_thy_lvls(key_dct, thy_dct)
def _split_line(line, num):
""" Split a line
"""
line = line.split()
if num == 3:
tsk, key_lst = line[:2], line[2:]
elif num == 2:
tsk, key_lst = line[:1], line[1:]
key_dct = ioformat.ptt.keyword_dct_from_block('\n'.join(key_lst))
return tsk + [key_dct] # could convert to empty dct instead of None
# Check a bunch of stuff
def check_inputs(tsk_dct, pes_dct, pes_mod_dct, spc_mod_dct):
""" Check if inputs placed that is required
"""
# Check if a mechanism has been provided where required
if tsk_dct['ktp'] or tsk_dct['thermo']:
if pes_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no kin model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if spc_mod_dct is None:
error_message(
'kTPDriver or Thermo Requested. \n'
' However no spc model provided in models.dat\n'
' Exiting MechDriver...')
sys.exit()
if tsk_dct['ktp']:
if pes_dct is None:
error_message(
'kTPDriver Requested. \n'
' However no reaction channels provided in mechanism.dat\n'
' Exiting MechDriver...')
sys.exit()
| mechlib/amech_io/parser/run.py | 15,601 | Loop over all of the tasks, add default keywords and parameters
and assesses if all the input is valid
Loops over the driver task list and checks if each task is a
macro-task that should be expanded into sub-tasks.
Right now, it splits all obj tasks into spc and ts
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
Split a line
Fill out the keyword dictionaries for various task lists with
default values
Set the sequence of electronic structure tasks for a given
species or PESs
Check if inputs placed that is required
Parses the `pes` block of the run.dat file and
builds a dictionary of the PESs and corresponding channels the
user wishes to run.
Parses the `spc` block of the run.dat file and
builds a dictionary of the species the
user wishes to run.
May break if idx is given on two lines of string.
:param run_str: string of the run.dat input file
:type run_str: str
:returns: ({pes_idx: list of channel_idxs}, {1: list of species idxs})
:rtype: dict[str: tuple]
Searches for a task in the task lst and if found:
the corresponding keywords and values will be returned
Function only works if task is present in the list one time.
:param tsk: task to extract information for
:type tsk: str
:param tsk_lst: list of tasks to run for some driver
:type tsk_lst: tuple(tuple(str/dict))
:rtype: tuple(str/dict)
Parses the `input` block and builds a
dictionary of keywords and their corresponding values.
:param run_str: input string of the run.dat block
:type run_str: str
:rtype: dict[str: obj]
runstr
Parses the `run.dat` input file for MechDriver that specifices all
of calculations to run for a given session of the code.
Specifcally, looks for and parses several subsections:
(1) `input` block: various input
(2) `pes' block: idxs denoting what PESs in mech file to run
(3) `spc` block: idxs denoting what species in .csv file to run
(4) `els tasks` block: set of tasks for ESDriver
(5) `therm tasks` block: set of tasks for ThermDriver
(6) `ktp tasks` block: set of tasks for kTPDriver
(7) `trans tasks` block: set of tasks for TransDriver
(8) `proc tasks` block: set of tasks for ProcDriver
Function parses the strings and converts them into formatted dictionaries
that are passed to the sub-drivers of the code:
ESDriver, ThermoDriver, kTPDriver, TransDriver, ProcDriver
These dictionaries are built in three stages:
(1) filled with user-specified options
(2) default values not defined by the user are added, and
(3) assessed that all keywordws and values are supported by the code.
DICTIONARIES OF DEFAULTS Run Keywords HANDLE TASK KEYS Commonly useful task keyword lists Supported object types for task (useful if task requestes 'all') Determines what objects and keywords are allowed for tasks for ES,Trans,Print Need way to set required tsks Tasks: (allowed obj, allowed_keywords) Electronic Structure Driver Tasks 're_id')), Transport Driver Tasks Process Driver Tasks KTP/Therm tsk: (object types, (allowed values), default) use functions for weird maybe the required checks use if None given? Common ES change to econfs, nconfs Trans Proc KTP/Therm Have nconfs and econfs keywords and combine them to figure out which to use? INPUT PARSERS Input Section Read the input block Add defaults to the dictionary Check the dictionary Chemistry objects PES idxs to run SPC idxs to run Kill code if no idxs given Driver Task Lists just looks in all pars Read blocks and build user determined task lists` print('els\n', es_block) print('therm\n', therm_block) print('trans\n', trans_block) print('proc\n', proc_block) Add defaults to each task as needed Assess each dictionary for correctness Build the task lists from the string mod_tsks = _expand_tsks(tsks) if num == 3 else tsks Unpack the task Case(1): spc task keywords (ESDriver) Case(2): task keywords (ThermoDriver, kTPDriver) Check if the obj is allowed Have to make lst to handle case where obj == 'all' Check if keyword values are allowed Check keywords with thylvls as values use lvls defined in thy dct could convert to empty dct instead of None Check a bunch of stuff Check if a mechanism has been provided where required | 4,246 | en | 0.778952 |
import logging
import pytest
import pydantic
import requests
from pymarc import Record, Field
from airflow.models import Variable
from pytest_mock import MockerFixture
from plugins.folio.helpers import (
archive_artifacts,
move_marc_files_check_tsv,
post_to_okapi,
process_marc,
_move_001_to_035,
transform_move_tsvs,
process_records,
setup_data_logging,
)
# Mock xcom messages dict
messages = {}
# Mock xcom
def mock_xcom_push(*args, **kwargs):
key = kwargs["key"]
value = kwargs["value"]
messages[key] = value
class MockTaskInstance(pydantic.BaseModel):
xcom_push = mock_xcom_push
@pytest.fixture
def mock_file_system(tmp_path):
airflow_path = tmp_path / "opt/airflow/"
# Mock source and target dirs
source_dir = airflow_path / "symphony"
source_dir.mkdir(parents=True)
sample_marc = source_dir / "sample.mrc"
sample_marc.write_text("sample")
target_dir = airflow_path / "migration/data/instances/"
target_dir.mkdir(parents=True)
# Mock Results and Archive Directories
results_dir = airflow_path / "migration/results"
results_dir.mkdir(parents=True)
archive_dir = airflow_path / "migration/archive"
archive_dir.mkdir(parents=True)
# mock tmp dir
tmp = tmp_path / "tmp/"
tmp.mkdir(parents=True)
return [
airflow_path,
source_dir,
target_dir,
results_dir,
archive_dir,
tmp
]
def test_move_marc_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
) # noqa
assert not (source_dir / "sample.mrc").exists()
assert messages["marc_only"]
def test_move_tsv_files(mock_file_system):
task_instance = MockTaskInstance()
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
sample_csv = source_dir / "sample.tsv"
sample_csv.write_text("sample")
move_marc_files_check_tsv(
task_instance=task_instance, airflow=airflow_path, source="symphony"
) # noqa
assert messages["marc_only"] is False
@pytest.fixture
def mock_dag_run(mocker: MockerFixture):
dag_run = mocker.stub(name="dag_run")
dag_run.run_id = "manual_2022-02-24"
return dag_run
def test_archive_artifacts(mock_dag_run, mock_file_system):
dag = mock_dag_run
airflow_path = mock_file_system[0]
results_dir = mock_file_system[3]
archive_dir = mock_file_system[4]
tmp_dir = mock_file_system[5]
# Create mock Instance JSON file
instance_filename = f"folio_instances_{dag.run_id}_bibs-transformer.json"
instance_file = results_dir / instance_filename
instance_file.write_text("""{ "id":"abcded2345"}""")
tmp_filename = "temp_file.json"
tmp_file = tmp_dir / tmp_filename
tmp_file.write_text("""{ "key":"vaaluue"}""")
target_file = archive_dir / instance_filename
archive_artifacts(dag_run=dag, airflow=airflow_path, tmp_dir=tmp_dir)
assert not instance_file.exists()
assert not tmp_file.exists()
assert target_file.exists()
@pytest.fixture
def mock_okapi_variable(monkeypatch):
def mock_get(key):
return "https://okapi-folio.dev.edu"
monkeypatch.setattr(Variable, "get", mock_get)
@pytest.fixture
def mock_records():
return [
{"id": "de09e01a-6d75-4007-b700-c83a475999b1"},
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"},
]
@pytest.fixture
def mock_okapi_success(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 201
return post_response
monkeypatch.setattr(requests, "post", mock_post)
@pytest.mark.output_capturing
def test_post_to_okapi(
mock_okapi_success, mock_okapi_variable, mock_dag_run, mock_records, caplog
):
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run(),
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
)
assert "Result status code 201 for 2 records" in caplog.text
@pytest.fixture
def mock_okapi_failure(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 422
post_response.text = """{
"errors" : [ {
"message" : "value already exists in table holdings_record: hld100000000027"
} ]
}""" # noqa
return post_response
monkeypatch.setattr(requests, "post", mock_post)
def test_post_to_okapi_failures(
mock_okapi_failure,
mock_okapi_variable,
mock_dag_run,
mock_records,
mock_file_system,
):
airflow_path = mock_file_system[0]
migration_results = mock_file_system[3]
post_to_okapi(
token="2345asdf",
dag_run=mock_dag_run,
records=mock_records,
endpoint="/instance-storage/batch/synchronous",
payload_key="instances",
airflow=airflow_path,
)
error_file = (
migration_results / "errors-instance-storage-422-manual_2022-02-24.json" # noqa
)
assert error_file.exists()
def test_process_marc():
assert process_marc
@pytest.fixture
def mock_marc_record():
record = Record()
field_245 = Field(
tag="245",
indicators=["0", "1"],
subfields=[
"a",
"The pragmatic programmer : ",
"b",
"from journeyman to master /",
"c",
"Andrew Hunt, David Thomas.",
],
)
field_001_1 = Field(tag="001", data="a123456789")
field_001_2 = Field(tag="001", data="gls_0987654321")
record.add_field(field_001_1, field_001_2, field_245)
return record
def test_move_001_to_035(mock_marc_record):
record = mock_marc_record
_move_001_to_035(record)
assert record.get_fields("035")[0].get_subfields("a")[0] == "gls_0987654321" # noqa
def test_missing_001_to_034(mock_marc_record):
record = mock_marc_record
record.remove_fields('001')
_move_001_to_035(record)
assert record.get_fields("035") == []
def test_transform_move_tsvs(mock_file_system):
airflow_path = mock_file_system[0]
source_dir = mock_file_system[1]
# mock sample csv and tsv
symphony_tsv = source_dir / "sample.tsv"
symphony_tsv.write_text(
"CATKEY\tCALL_NUMBER_TYPE\tBARCODE\n123456\tLC 12345\t45677 ")
tsv_directory = airflow_path / "migration/data/items"
tsv_directory.mkdir(parents=True)
sample_tsv = tsv_directory / "sample.tsv"
column_transforms = [("CATKEY", lambda x: f"a{x}"),
("BARCODE", lambda x: x.strip())]
transform_move_tsvs(
airflow=airflow_path,
column_transforms=column_transforms,
source="symphony",
)
f = open(sample_tsv, "r")
assert f.readlines()[1] == "a123456\tLC 12345\t45677\n"
f.close()
def test_process_records(mock_dag_run, mock_file_system):
airflow_path = mock_file_system[0]
tmp = mock_file_system[5]
results_dir = mock_file_system[3]
# mock results file
results_file = results_dir / "folio_instances-manual_2022-02-24.json"
results_file.write_text(
"""{"id": "de09e01a-6d75-4007-b700-c83a475999b1"}
{"id": "123326dd-9924-498f-9ca3-4fa00dda6c90"}"""
)
num_records = process_records(
prefix="folio_instances",
out_filename="instances",
jobs=1,
dag_run=mock_dag_run,
airflow=str(airflow_path),
tmp=str(tmp),
)
assert num_records == 2
@pytest.fixture
def mock_logger_file_handler(monkeypatch, mocker: MockerFixture):
def mock_file_handler(*args, **kwargs):
file_handler = mocker.stub(name="file_handler")
file_handler.addFilter = lambda x: x
file_handler.setFormatter = lambda x: x
file_handler.setLevel = lambda x: x
return file_handler
monkeypatch.setattr(logging, "FileHandler", mock_file_handler)
class MockFolderStructure(pydantic.BaseModel):
data_issue_file_path = "data-issues-1345.tsv"
class MockTransform(pydantic.BaseModel):
_log = None
folder_structure = MockFolderStructure()
def test_setup_data_logging(mock_logger_file_handler):
transformer = MockTransform()
assert hasattr(logging.Logger, "data_issues") is False
assert len(logging.getLogger().handlers) == 5
setup_data_logging(transformer)
assert hasattr(logging.Logger, "data_issues")
assert len(logging.getLogger().handlers) == 6
# Removes handler otherwise fails subsequent tests
file_handler = logging.getLogger().handlers[-1]
logging.getLogger().removeHandler(file_handler)
| plugins/tests/test_helpers.py | 8,884 | Mock xcom messages dict Mock xcom Mock source and target dirs Mock Results and Archive Directories mock tmp dir noqa noqa Create mock Instance JSON file noqa noqa noqa mock sample csv and tsv mock results file Removes handler otherwise fails subsequent tests | 258 | en | 0.719138 |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import json
import os
import codecs
import cPickle
from unittest.case import skipIf
from commoncode.testcase import FileBasedTesting
from textcode.analysis import DEFAULT_GAP
from textcode.analysis import NO_GAP
from textcode.analysis import InvalidGapError
from textcode.analysis import UnbalancedTemplateError
from textcode.analysis import Token
from textcode.analysis import word_splitter
from textcode.analysis import unigram_splitter
from textcode.analysis import unigram_tokenizer
from textcode.analysis import position_processor
from textcode.analysis import template_splitter
from textcode.analysis import template_processor
from textcode.analysis import ngram_to_token
from textcode.analysis import ngram_tokenizer
from textcode.analysis import tokens_ngram_processor
from textcode.analysis import doc_subset
from textcode.analysis import unicode_text_lines
from textcode.analysis import text_lines
#############################################################################
#
# Code style note: lines are not wrapped to PEP8 line length on purpose
# to keep the tests more readable
#
#############################################################################
class TestDocsubset(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_doc_subset_single_line(self):
doc = '''A simple test
with multiple
lines
of text
'''.splitlines()
pos = Token(start=0, end=0, start_line=1, start_char=8, end_line=1, end_char=21)
expected = '''with multiple'''
tst = doc_subset(iter(doc), pos)
result = '\n'.join(tst)
assert expected == result
def test_doc_subset_multilines(self):
doc = '''0123456789\n0123456789\n'''.splitlines()
pos = Token(start=0, end=0, start_line=0, start_char=0, end_line=0, end_char=10)
expected = '0123456789'
tst = doc_subset(iter(doc), pos)
result = ''.join(tst)
assert expected == result
def test_doc_subset(self):
doc = iter('''A simple test
with multiple
lines
of text
'''.splitlines())
pos = Token(start=3, end=54, start_line=1, start_char=8, end_line=2, end_char=11)
expected = u'''with multiple
lin'''
tst = doc_subset(iter(doc), pos)
result = u'\n'.join(tst)
assert expected == result
class TestAnalysis(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_text_lines_from_list_or_location_yield_same_results(self):
test_file = self.get_test_loc('analysis/bsd-new')
with open(test_file, 'rb') as inf:
test_strings_list = inf.read().splitlines(True)
# test when we are passing a location or a list
from_loc = list(text_lines(location=test_file))
from_list = list(text_lines(location=test_strings_list))
assert from_loc == from_list
class TestUnigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_unigrams_word_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_handles_blank_lines(self):
text = iter([u' ', u'', u'\t '])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_word_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=word_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=4, value=u'ghi'),
]
assert expected == result
def test_unigrams_word_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=word_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_string(self):
text = iter([''])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_blank_lines(self):
text = iter([' ', '', '\t '])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_handles_empty_iterable(self):
text = iter([])
result = list(unigram_splitter(text, splitter=template_splitter))
assert [] == result
def test_unigrams_template_splitter_can_split(self):
text = iter(u'abc def \n GHI'.splitlines())
result = list(unigram_splitter(text, splitter=template_splitter))
assert [u'abc', u'def', u'ghi'] == [x.value for x in result]
def test_unigrams_template_splitter_can_split_templates(self):
text = u'abc def \n {{temp}} GHI'.splitlines()
result = list(unigram_splitter(text, splitter=template_splitter))
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=3, value=u'abc'),
Token(start_line=0, end_line=0, start_char=4, end_char=7, value=u'def'),
Token(start_line=1, end_line=1, start_char=1, end_char=3, value=u'{{'),
Token(start_line=1, end_line=1, start_char=3, end_char=7, value=u'temp'),
Token(start_line=1, end_line=1, start_char=7, end_char=9, value=u'}}'),
Token(start_line=1, end_line=1, start_char=10, end_char=13, value=u'ghi'),
]
assert expected == result
def test_position_processor(self):
tokens = [
Token(value=u'abc'),
Token(value=u'def'),
Token(value=u'temp'),
Token(value=u'ghi'),
]
expected = [
Token(value=u'abc', start=0, end=0),
Token(value=u'def', start=1, end=1),
Token(value=u'temp', start=2, end=2),
Token(value=u'ghi', start=3, end=3),
]
result = list(position_processor(tokens))
assert expected == result
def test_unigram_tokenizer(self):
inp = u'''Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.'''
tst = list(unigram_tokenizer(inp.splitlines(True)))
assert 39 == len(tst)
expected = u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the above
copyright notice this list of conditions and the following
disclaimer'''.split()
result = [t.value for t in tst]
assert expected == result
class TestTemplates(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def template_parsing(self, lines):
if isinstance(lines, basestring):
lines = lines.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
return list(template_processor(unigrams))
def test_process_template_handles_empty_templates_using_default_gap(self):
lines = [u'ab{{}}cd']
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=6, end_char=8, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_gap(self):
lines = u'ab{{10 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=21, end_char=23, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_raise_invalid_gap_exception(self):
lines = u'ab{{151 nexb Company}}cd'
self.assertRaises(InvalidGapError, self.template_parsing, lines)
def test_process_template_recognizes_template_with_maxgap(self):
lines = u'ab{{150 nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=150),
Token(start_line=0, end_line=0, start_char=22, end_char=24, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap(self):
lines = u'ab{{10}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=8, end_char=10, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_recognizes_template_with_only_gap_and_spaces(self):
lines = u'ab{{ 10 }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=10),
Token(start_line=0, end_line=0, start_char=16, end_char=18, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified(self):
lines = u'ab{{nexb Company}}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_set_default_gap_if_none_is_specified_ignoring_spaces(self):
lines = u'ab{{ \sdsdnexb Companysd }}cd'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=28, end_char=30, value=u'cd', gap=NO_GAP)
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap(self):
lines = u'ab{{nexb Company}}cd {{second}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=31, end_char=33, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_can_process_multiple_templatized_regions_with_default_gap_and_custom_gaps(self):
lines = u'ab{{nexb Company}}cd{{12 second}}ef{{12 second}}gh'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=20, value=u'cd', gap=12),
Token(start_line=0, end_line=0, start_char=33, end_char=35, value=u'ef', gap=12),
Token(start_line=0, end_line=0, start_char=48, end_char=50, value=u'gh', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates(self):
lines = u'ab{{c}}d}}ef'
expected = [
Token(start_line=0, end_line=0, start_char=0, end_char=2, value=u'ab', gap=DEFAULT_GAP),
Token(start_line=0, end_line=0, start_char=7, end_char=8, value=u'd', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=10, end_char=12, value=u'ef', gap=NO_GAP),
]
assert expected == self.template_parsing(lines)
def test_process_template_handles_empty_lines(self):
lines = u'\n\n'
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_handles_None(self):
lines = None
expected = []
assert expected == self.template_parsing(lines)
def test_process_template_can_parse_simple_line(self):
lines = u'Licensed by {{12 nexB}} to you '
expected = u'licensed by to you'
result = u' '.join(x.value for x in self.template_parsing(lines))
assert expected == result
def test_process_template_does_not_throw_exception_for_illegal_pystache_templates(self):
lines = u'''Permission to use, copy, modify, and {{ /or : the
lines exist without or }} distribute this software...'''
self.template_parsing(lines)
def test_process_template_handles_unicode_text_correctly(self):
expected = [
Token(start_line=0, end_line=0, start_char=1, end_char=4, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=5, end_char=10, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=11, end_char=19, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=21, end_char=24, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=25, end_char=31, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=32, end_char=34, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=35, end_char=40, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=41, end_char=44, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=56, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=57, end_char=60, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=61, end_char=64, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.atxt')
with codecs.open(test_file, encoding='utf-8') as test:
lines = test.read().splitlines()
result = list(self.template_parsing(lines))
assert expected == result
def test_process_template_can_handle_long_text(self):
expected = [
Token(start_line=0, end_line=0, start_char=14, end_char=17, value=u'ist', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=18, end_char=23, value=u'freie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=24, end_char=32, value=u'software', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=34, end_char=37, value=u'sie', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=38, end_char=44, value=u'k\xf6nnen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=45, end_char=47, value=u'es', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=48, end_char=53, value=u'unter', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=54, end_char=57, value=u'den', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=58, end_char=69, value=u'bedingungen', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=70, end_char=73, value=u'der', gap=NO_GAP),
Token(start_line=0, end_line=0, start_char=74, end_char=77, value=u'gnu', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=1, end_char=8, value=u'general', gap=NO_GAP),
Token(start_line=1, end_line=1, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=17, end_char=20, value=u'wie', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=21, end_char=24, value=u'von', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=25, end_char=28, value=u'der', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=29, end_char=33, value=u'free', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=34, end_char=42, value=u'software', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=43, end_char=53, value=u'foundation', gap=NO_GAP),
Token(start_line=2, end_line=2, start_char=54, end_char=68, value=u'ver\xf6ffentlicht', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=1, end_char=12, value=u'weitergeben', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=13, end_char=16, value=u'und', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=17, end_char=21, value=u'oder', gap=NO_GAP),
Token(start_line=3, end_line=3, start_char=23, end_char=24, value=u'n', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=1, end_char=13, value=u'modifizieren', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=15, end_char=23, value=u'entweder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=24, end_char=29, value=u'gem\xe4\xdf', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=30, end_char=37, value=u'version', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=38, end_char=39, value=u'3', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=40, end_char=43, value=u'der', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=44, end_char=50, value=u'lizenz', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=51, end_char=55, value=u'oder', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=57, end_char=61, value=u'nach', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=62, end_char=67, value=u'ihrer', gap=NO_GAP),
Token(start_line=4, end_line=4, start_char=68, end_char=74, value=u'option', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=1, end_char=6, value=u'jeder', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=7, end_char=15, value=u'sp\xe4teren', gap=NO_GAP),
Token(start_line=5, end_line=5, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=1, end_char=8, value=u'version', gap=NO_GAP),
Token(start_line=6, end_line=6, start_char=10, end_char=11, value=u'n', gap=NO_GAP),
Token(start_line=7, end_line=7, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=1, end_char=4, value=u'die', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=5, end_char=21, value=u'ver\xf6ffentlichung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=22, end_char=25, value=u'von', gap=DEFAULT_GAP),
Token(start_line=8, end_line=8, start_char=38, end_char=45, value=u'erfolgt', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=46, end_char=48, value=u'in', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=49, end_char=52, value=u'der', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=53, end_char=61, value=u'hoffnung', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=63, end_char=66, value=u'da\xdf', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=67, end_char=69, value=u'es', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=70, end_char=75, value=u'ihnen', gap=NO_GAP),
Token(start_line=8, end_line=8, start_char=76, end_char=79, value=u'von', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=1, end_char=7, value=u'nutzen', gap=NO_GAP),
Token(start_line=9, end_line=9, start_char=9, end_char=10, value=u'n', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=1, end_char=5, value=u'sein', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=6, end_char=10, value=u'wird', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=12, end_char=16, value=u'aber', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=17, end_char=21, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=22, end_char=32, value=u'irgendeine', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=33, end_char=41, value=u'garantie', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=43, end_char=48, value=u'sogar', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=49, end_char=53, value=u'ohne', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=54, end_char=57, value=u'die', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=58, end_char=67, value=u'implizite', gap=NO_GAP),
Token(start_line=10, end_line=10, start_char=68, end_char=76, value=u'garantie', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=5, end_char=15, value=u'marktreife', gap=NO_GAP),
Token(start_line=11, end_line=11, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=1, end_char=5, value=u'oder', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=6, end_char=9, value=u'der', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=10, end_char=24, value=u'verwendbarkeit', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=25, end_char=28, value=u'f\xfcr', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=29, end_char=34, value=u'einen', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=35, end_char=45, value=u'bestimmten', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=46, end_char=51, value=u'zweck', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=53, end_char=60, value=u'details', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=61, end_char=67, value=u'finden', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=68, end_char=71, value=u'sie', gap=NO_GAP),
Token(start_line=12, end_line=12, start_char=72, end_char=74, value=u'in', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=1, end_char=4, value=u'der', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=5, end_char=8, value=u'gnu', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=9, end_char=16, value=u'general', gap=NO_GAP),
Token(start_line=13, end_line=13, start_char=18, end_char=19, value=u'n', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=1, end_char=7, value=u'public', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=8, end_char=15, value=u'license', gap=NO_GAP),
Token(start_line=14, end_line=14, start_char=17, end_char=18, value=u'n', gap=NO_GAP),
Token(start_line=15, end_line=15, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=1, end_char=4, value=u'sie', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=5, end_char=12, value=u'sollten', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=13, end_char=16, value=u'ein', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=17, end_char=25, value=u'exemplar', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=26, end_char=29, value=u'der', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=30, end_char=33, value=u'gnu', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=34, end_char=41, value=u'general', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=42, end_char=48, value=u'public', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=49, end_char=56, value=u'license', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=57, end_char=65, value=u'zusammen', gap=NO_GAP),
Token(start_line=16, end_line=16, start_char=66, end_char=69, value=u'mit', gap=DEFAULT_GAP),
Token(start_line=17, end_line=17, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=1, end_char=9, value=u'erhalten', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=10, end_char=15, value=u'haben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=17, end_char=22, value=u'falls', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=23, end_char=28, value=u'nicht', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=30, end_char=39, value=u'schreiben', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=40, end_char=43, value=u'sie', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=44, end_char=46, value=u'an', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=47, end_char=50, value=u'die', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=51, end_char=55, value=u'free', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=56, end_char=64, value=u'software', gap=NO_GAP),
Token(start_line=18, end_line=18, start_char=65, end_char=75, value=u'foundation', gap=NO_GAP),
Token(start_line=19, end_line=19, start_char=2, end_char=3, value=u'n', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=1, end_char=4, value=u'inc', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=7, end_char=9, value=u'51', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=10, end_char=18, value=u'franklin', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=19, end_char=21, value=u'st', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=23, end_char=28, value=u'fifth', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=29, end_char=34, value=u'floor', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=36, end_char=42, value=u'boston', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=44, end_char=46, value=u'ma', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=47, end_char=52, value=u'02110', gap=NO_GAP),
Token(start_line=20, end_line=20, start_char=54, end_char=57, value=u'usa', gap=NO_GAP),
]
test_file = self.get_test_loc('analysis/unicode/12180.txt')
with codecs.open(test_file, encoding='utf-8') as test:
result = list(self.template_parsing(test))
assert expected == result
def test_process_template_does_not_crash_on_unicode_rules_text_1(self):
test_file = self.get_test_loc('analysis/unicode/12290.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_2(self):
test_file = self.get_test_loc('analysis/unicode/12319.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_3(self):
test_file = self.get_test_loc('analysis/unicode/12405.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_4(self):
test_file = self.get_test_loc('analysis/unicode/12407.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_does_not_crash_on_unicode_rules_text_5(self):
test_file = self.get_test_loc('analysis/unicode/12420.txt')
with codecs.open(test_file, encoding='utf-8') as test:
list(self.template_parsing(test))
def test_process_template_detects_non_well_formed_templatized_regions(self):
lines = u'abcd{{ef'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_handles_combination_of_well_formed_and_ill_formed_templates_2(self):
lines = u'}}{{{{abc}}ddd}}{{'
self.assertRaises(UnbalancedTemplateError, self.template_parsing, lines)
def test_process_template_can_parse_ill_formed_template(self):
tf = self.get_test_loc('analysis/ill_formed_template/text.txt')
lines = unicode_text_lines(tf)
result = list(self.template_parsing(lines))
expected_gaps = [30, 10, 60, 70, 20]
result_gaps = [x.gap for x in result if x.gap]
assert expected_gaps == result_gaps
et = self.get_test_loc('analysis/ill_formed_template/expected_grams.json')
result_dicts = [t._asdict() for t in result]
regen = False
if regen:
with codecs.open(et, 'w', encoding='utf-8') as out:
json.dump(result_dicts, out, indent=2)
with codecs.open(et, encoding='utf-8') as inp:
expected = json.load(inp)
assert expected == result_dicts
def test_token_positions_are_kept_same_for_unigrams_and_ngrams_with_template(self):
lines = u'some text is some text {{ }} in all cases\n \n'
unigrams = unigram_tokenizer(iter([lines]), template=False)
tunigrams = unigram_tokenizer(iter([lines]), template=True)
ngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=False)
tngrams = ngram_tokenizer(iter([lines]), ngram_len=3, template=True)
expected_start_end = (0, 7,)
def check_start_end(l):
l = list(l)
result = (l[0].start, l[-1].end,)
assert expected_start_end == result
check_start_end(unigrams)
check_start_end(tunigrams)
check_start_end(ngrams)
check_start_end(tngrams)
def test_plain_unigrams_from_templated_unigrams(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
result = list(template_processor(unigrams))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=0, value=u'old'),
Token(start=0, start_line=0, start_char=7, end_line=0, end_char=13, end=0, gap=3, value=u'tailor'),
Token(start=0, start_line=0, start_char=29, end_line=0, end_char=31, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=32, end_line=0, end_char=37, end=0, gap=0, value=u'quite'),
Token(start=0, start_line=0, start_char=38, end_line=0, end_char=42, end=0, gap=0, value=u'very'),
Token(start=0, start_line=0, start_char=43, end_line=0, end_char=47, end=0, gap=0, value=u'rich'),
]
assert expected == result
class TestLegacyNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_plain_ngrams_processor(self):
from collections import deque
def ngram_processor(items, ngram_len):
"""
Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
"""
ngram = deque()
current_len = 0
for item in items:
if current_len == ngram_len:
yield tuple(ngram)
ngram.popleft()
current_len -= 1
ngram.append(item)
current_len += 1
yield tuple(ngram)
text = (
u'''/*COMMENT
COMMENT COMMENT
- COMMENT
*/
public static boolean activateSearchResultView() {
String defaultPerspectiveId= SearchUI.getDefaultPerspectiveId();
if (defaultPerspectiveId != null) {
IWorkbenchWindow window= SearchPlugin.getActiveWorkbenchWindow();
if (window != null && window.getShell() != null && !window.getShell().isDisposed()) {
try {
PlatformUI.getWorkbench().showPerspective(defaultPerspectiveId, window);
} catch (WorkbenchException ex) {
// show view in current perspective
}
}
}''')
expected = [
(u'comment', u'comment', u'comment', u'comment', u'public', u'static'),
(u'comment', u'comment', u'comment', u'public', u'static', u'boolean'),
(u'comment', u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview'),
(u'comment', u'public', u'static', u'boolean',
u'activatesearchresultview', u'string'),
(u'public', u'static', u'boolean', u'activatesearchresultview',
u'string', u'defaultperspectiveid'),
(u'static', u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui'),
(u'boolean', u'activatesearchresultview', u'string',
u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid'),
(u'activatesearchresultview', u'string', u'defaultperspectiveid',
u'searchui', u'getdefaultperspectiveid', u'if'),
(u'string', u'defaultperspectiveid', u'searchui',
u'getdefaultperspectiveid', u'if', u'defaultperspectiveid'),
(u'defaultperspectiveid', u'searchui', u'getdefaultperspectiveid',
u'if', u'defaultperspectiveid', u'null'),
(u'searchui', u'getdefaultperspectiveid', u'if',
u'defaultperspectiveid', u'null', u'iworkbenchwindow'),
(u'getdefaultperspectiveid', u'if', u'defaultperspectiveid', u'null',
u'iworkbenchwindow', u'window'),
(u'if', u'defaultperspectiveid', u'null', u'iworkbenchwindow',
u'window', u'searchplugin'),
(u'defaultperspectiveid', u'null', u'iworkbenchwindow', u'window',
u'searchplugin', u'getactiveworkbenchwindow'),
(u'null', u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if'),
(u'iworkbenchwindow', u'window', u'searchplugin',
u'getactiveworkbenchwindow', u'if', u'window'),
(u'window', u'searchplugin', u'getactiveworkbenchwindow', u'if',
u'window', u'null'),
(u'searchplugin', u'getactiveworkbenchwindow', u'if', u'window',
u'null', u'window'),
(u'getactiveworkbenchwindow', u'if', u'window', u'null', u'window',
u'getshell'),
(u'if', u'window', u'null', u'window', u'getshell', u'null'),
(u'window', u'null', u'window', u'getshell', u'null', u'window'),
(u'null', u'window', u'getshell', u'null', u'window', u'getshell'),
(u'window', u'getshell', u'null', u'window', u'getshell', u'isdisposed'),
(u'getshell', u'null', u'window', u'getshell', u'isdisposed', u'try'),
(u'null', u'window', u'getshell', u'isdisposed', u'try', u'platformui'),
(u'window', u'getshell', u'isdisposed', u'try', u'platformui',
u'getworkbench'),
(u'getshell', u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective'),
(u'isdisposed', u'try', u'platformui', u'getworkbench',
u'showperspective', u'defaultperspectiveid'),
(u'try', u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window'),
(u'platformui', u'getworkbench', u'showperspective',
u'defaultperspectiveid', u'window', u'catch'),
(u'getworkbench', u'showperspective', u'defaultperspectiveid',
u'window', u'catch', u'workbenchexception'),
(u'showperspective', u'defaultperspectiveid', u'window', u'catch',
u'workbenchexception', u'ex'),
(u'defaultperspectiveid', u'window', u'catch', u'workbenchexception',
u'ex', u'show'),
(u'window', u'catch', u'workbenchexception', u'ex', u'show', u'view'),
(u'catch', u'workbenchexception', u'ex', u'show', u'view', u'in'),
(u'workbenchexception', u'ex', u'show', u'view', u'in', u'current'),
(u'ex', u'show', u'view', u'in', u'current', u'perspective'),
]
unigrams = (x.value for x
in unigram_splitter(text.splitlines()))
result = list(ngram_processor(unigrams, ngram_len=6))
assert expected == result
class TestNgrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_tokens_ngram_processor_bigrams_from_unigrams(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=2))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_n2_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=2))
assert expected == result
def test_tokens_ngram_processor_n3_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=3))
assert expected == result
def test_tokens_ngram_processor_n4_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=4))
assert expected == result
def test_tokens_ngram_processor_n10_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is')),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=10))
assert expected == result
def test_tokens_ngram_processor_n1_with_2_tokens(self):
text = u'this is'
unigrams = list(unigram_splitter(text.splitlines()))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),),
]
result = list(tokens_ngram_processor(iter(unigrams), ngram_len=1))
assert expected == result
def test_tokens_ngram_processor_3grams_from_unigrams_on_multilines(self):
text = u'this is some text \n on multiple lines'
unigrams = unigram_splitter(text.splitlines())
result = list(tokens_ngram_processor(unigrams, ngram_len=3))
expected = [
(Token(start_line=0, start_char=0, end_line=0, end_char=4, value=u'this'),
Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some')),
(Token(start_line=0, start_char=5, end_line=0, end_char=7, value=u'is'),
Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text')),
(Token(start_line=0, start_char=8, end_line=0, end_char=12, value=u'some'),
Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on')),
(Token(start_line=0, start_char=13, end_line=0, end_char=17, value=u'text'),
Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple')),
(Token(start_line=1, start_char=1, end_line=1, end_char=3, value=u'on'),
Token(start_line=1, start_char=4, end_line=1, end_char=12, value=u'multiple'),
Token(start_line=1, start_char=13, end_line=1, end_char=18, value=u'lines'))
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_basic(self):
lines = [u'My old {{3 John Doe}} is rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=0, value=u'my'),
Token(start=0, start_line=0, start_char=3, end_line=0, end_char=6, end=0, gap=3, value=u'old'),
),
(Token(start=0, start_line=0, start_char=22, end_line=0, end_char=24, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=25, end_line=0, end_char=29, end=0, gap=0, value=u'rich'),
)
]
assert expected == result
def test_tokens_ngram_processor_with_template_gaps_merged(self):
lines = [u'My old tailor {{3 John Doe}} is quite very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start_line=0, start_char=0, end_line=0, end_char=13, gap=ngram_len, value=(u'my', u'old', u'tailor')),
Token(start_line=0, start_char=29, end_line=0, end_char=42, gap=0, value=(u'is', u'quite', u'very')),
Token(start_line=0, start_char=32, end_line=0, end_char=47, gap=0, value=(u'quite', u'very', u'rich')),
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=57, end=0, gap=0, value=(u'very', u'rich'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_short_and_long_grams(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }} very rich really rich']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite')),
Token(start=0, start_line=0, start_char=48, end_line=0, end_char=64, end=0, gap=0, value=(u'very', u'rich', u'really')),
Token(start=0, start_line=0, start_char=53, end_line=0, end_char=69, end=0, gap=0, value=(u'rich', u'really', u'rich'))
]
assert expected == result
def test_ngram_to_token_processor_with_gaps_at_the_end(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
ngrams_tuples = tokens_ngram_processor(templated, ngram_len=ngram_len)
result = list(ngram_to_token(ngrams_tuples))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=(u'my',)),
Token(start=0, start_line=0, start_char=20, end_line=0, end_char=28, end=0, gap=5, value=(u'is', u'quite'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_at_the_end_does_yield_empty_tuples(self):
lines = [u'My {{3 tailor Joe}} is quite {{ pleasant and }}']
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
ngram_len = 3
result = list(tokens_ngram_processor(templated, ngram_len=ngram_len))
assert (None, None, None,) != result[-1]
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=2, end=0, gap=3, value=u'my'),),
(Token(start=0, start_line=0, start_char=20, end_line=0, end_char=22, end=0, gap=0, value=u'is'),
Token(start=0, start_line=0, start_char=23, end_line=0, end_char=28, end=0, gap=5, value=u'quite'),
)
]
assert expected == result
def test_ngrams_tokenizer_does_not_yield_4grams_for_3grams(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the names {{}}of its contributors may
materials provided with the distribution.'''.splitlines()
result = list(ngram_tokenizer(iter(lines), ngram_len=3, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=16, end=2, gap=0, value=(u'neither', u'the', u'name')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=19, end=3, gap=10, value=(u'the', u'name', u'of')),
Token(start=4, start_line=0, start_char=44, end_line=0, end_char=47, end=4, gap=5, value=(u'nor',)),
Token(start=5, start_line=0, start_char=52, end_line=0, end_char=61, end=6, gap=5, value=(u'the', u'names')),
Token(start=7, start_line=0, start_char=66, end_line=0, end_char=85, end=9, gap=0, value=(u'of', u'its', u'contributors')),
Token(start=8, start_line=0, start_char=69, end_line=0, end_char=89, end=10, gap=0, value=(u'its', u'contributors', u'may')),
Token(start=9, start_line=0, start_char=73, end_line=1, end_char=25, end=11, gap=0, value=(u'contributors', u'may', u'materials')),
Token(start=10, start_line=0, start_char=86, end_line=1, end_char=34, end=12, gap=0, value=(u'may', u'materials', u'provided')),
Token(start=11, start_line=1, start_char=16, end_line=1, end_char=39, end=13, gap=0, value=(u'materials', u'provided', u'with')),
Token(start=12, start_line=1, start_char=26, end_line=1, end_char=43, end=14, gap=0, value=(u'provided', u'with', u'the')),
Token(start=13, start_line=1, start_char=35, end_line=1, end_char=56, end=15, gap=0, value=(u'with', u'the', u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_3grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=3))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name')),
(Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_merged_always_returns_4grams_when_requested(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}the
names {{}}of its contributors may materials provided with
the distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=0, start_char=52, end_line=0, end_char=55, end=0, gap=0, value=u'the'),
Token(start=0, start_line=1, start_char=19, end_line=1, end_char=24, end=0, gap=5, value=u'names')),
(Token(start=0, start_line=1, start_char=29, end_line=1, end_char=31, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=35, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=36, end_line=1, end_char=48, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=52, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=53, end_line=1, end_char=62, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=63, end_line=1, end_char=71, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=72, end_line=1, end_char=76, end=0, gap=0, value=u'with'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=22, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=23, end_line=2, end_char=35, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_tokens_ngram_processor_with_gaps_can_handle_contiguous_template_regions(self):
lines = u'''Neither the name of {{10 the ORGANIZATION}} nor {{}}
{{6 }}of its contributors may materials provided with the
distribution.'''.splitlines()
unigrams = unigram_splitter(lines, splitter=template_splitter)
templated = template_processor(unigrams)
result = list(tokens_ngram_processor(templated, ngram_len=4))
expected = [
(Token(start=0, start_line=0, start_char=0, end_line=0, end_char=7, end=0, gap=0, value=u'neither'),
Token(start=0, start_line=0, start_char=8, end_line=0, end_char=11, end=0, gap=0, value=u'the'),
Token(start=0, start_line=0, start_char=12, end_line=0, end_char=16, end=0, gap=0, value=u'name'),
Token(start=0, start_line=0, start_char=17, end_line=0, end_char=19, end=0, gap=10, value=u'of')),
(Token(start=0, start_line=0, start_char=44, end_line=0, end_char=47, end=0, gap=5, value=u'nor'),),
(Token(start=0, start_line=1, start_char=25, end_line=1, end_char=27, end=0, gap=0, value=u'of'),
Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may')),
(Token(start=0, start_line=1, start_char=28, end_line=1, end_char=31, end=0, gap=0, value=u'its'),
Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials')),
(Token(start=0, start_line=1, start_char=32, end_line=1, end_char=44, end=0, gap=0, value=u'contributors'),
Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided')),
(Token(start=0, start_line=1, start_char=45, end_line=1, end_char=48, end=0, gap=0, value=u'may'),
Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with')),
(Token(start=0, start_line=1, start_char=49, end_line=1, end_char=58, end=0, gap=0, value=u'materials'),
Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the')),
(Token(start=0, start_line=1, start_char=59, end_line=1, end_char=67, end=0, gap=0, value=u'provided'),
Token(start=0, start_line=1, start_char=68, end_line=1, end_char=72, end=0, gap=0, value=u'with'),
Token(start=0, start_line=1, start_char=73, end_line=1, end_char=76, end=0, gap=0, value=u'the'),
Token(start=0, start_line=2, start_char=19, end_line=2, end_char=31, end=0, gap=0, value=u'distribution'))
]
assert expected == result
def test_ngram_tokenizer_can_handle_gaps_at_end_of_text(self):
lines = [u'Neither the name of {{10 the ORGANIZATION}} ']
ngram_len = 2
result = list(ngram_tokenizer(lines, ngram_len, template=True))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'neither', u'the')),
Token(start=1, start_line=0, start_char=8, end_line=0, end_char=16, end=2, gap=0, value=(u'the', u'name')),
Token(start=2, start_line=0, start_char=12, end_line=0, end_char=19, end=3, gap=10, value=(u'name', u'of'))
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n3(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_n1(self):
lines = [u'X11 License']
ngram_len = 1
result = list(ngram_tokenizer(lines, ngram_len))
expected = [
Token(start=0, start_line=0, start_char=0, end_line=0, end_char=3, end=0, gap=0, value=(u'x11',)),
Token(start=1, start_line=0, start_char=4, end_line=0, end_char=11, end=1, gap=0, value=(u'license',)),
]
assert expected == result
def test_ngram_tokenizer_returns_correct_offsets_template(self):
lines = [u'X11 License']
ngram_len = 3
result = list(ngram_tokenizer(lines, ngram_len, template=True))
assert lines == list(doc_subset(lines, result[0]))
expected = [Token(start=0, start_line=0, start_char=0, end_line=0, end_char=11, end=1, gap=0, value=(u'x11', u'license'))]
assert expected == result
def test_unicode_text_lines_handles_weird_xml_encodings(self):
test_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom')
result = list(unicode_text_lines(test_file))
expected_file = self.get_test_loc('analysis/weird_encoding/easyconf-0.9.0.pom.expected')
with open(expected_file, 'rb') as tf:
expected = cPickle.load(tf)
assert expected == result
class TestMultigrams(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# TODO: add more tests beyond the simple doctests that exist in the code
@skipIf(True, 'Performance tests only')
class TestAnalysisPerformance(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_splitter_perf(self):
test_file = self.get_test_loc('perf/test.txt')
text = open(test_file).read() * 100
utext = unicode(text)
setup1 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
def check_perf(setup):
from timeit import timeit
stmt = 'list(w for w in %s(%s))'
print()
print('Unicode template')
print(timeit(stmt % ('unicode_ts', 'utext'), setup=setup, number=1000))
print('Plain template')
print(timeit(stmt % ('plain_ts', 'text'), setup=setup, number=1000))
print('Unicode words')
print(timeit(stmt % ('unicode_ws', 'utext'), setup=setup, number=1000))
print('Plain words')
print(timeit(stmt % ('plain_ws', 'text'), setup=setup, number=1000))
print('Plain split')
print(timeit('text.split()', setup=setup, number=1000))
print('Unicode split')
print(timeit('utext.split()', setup=setup, number=1000))
print('Line split')
print(timeit('text.splitlines(False)', setup=setup, number=1000))
print('Line split with ends')
print(timeit('text.splitlines(True)', setup=setup, number=1000))
check_perf(setup=setup1)
setup2 = '''
import re
from textcode import analysis
unicode_ws = analysis.word_splitter
plain_ws =re.compile(r'[^\W_]+').finditer
unicode_ts = analysis.template_splitter
plain_ts= re.compile(r'(?:[^\W_])+|(?:\{\{)|(?:\}\})').finditer
text = %r
utext = %r''' % (text, utext)
check_perf(setup=setup2)
| tests/textcode/test_analysis.py | 79,390 | Given a sequence or iterable of arbitrary items, return an iterator of
item ngrams tuples of length ngram_len. Buffers at most ngram_len iterable
items.
For example::
>>> list(ngram_processor([1, 2, 3, 4, 5], ngram_len=3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Copyright (c) 2015 nexB Inc. and others. All rights reserved. http://nexb.com and https://github.com/nexB/scancode-toolkit/ The ScanCode software is licensed under the Apache License version 2.0. Data generated with ScanCode require an acknowledgment. ScanCode is a trademark of nexB Inc. You may not use this software except in compliance with the License. You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. When you publish or redistribute any data created with ScanCode or any ScanCode derivative work, you must accompany this data with the following acknowledgment: Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. No content created from ScanCode should be considered or used as legal advice. Consult an Attorney for any legal advice. ScanCode is a free software code scanning tool from nexB Inc. and others. Visit https://github.com/nexB/scancode-toolkit/ for support and download. Code style note: lines are not wrapped to PEP8 line length on purpose to keep the tests more readable test when we are passing a location or a list TODO: add more tests beyond the simple doctests that exist in the code | 1,791 | en | 0.840768 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import MultiResolutionAffineRegistration
def test_MultiResolutionAffineRegistration_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
fixedImage=dict(
argstr='%s',
position=-2,
),
fixedImageMask=dict(argstr='--fixedImageMask %s', ),
fixedImageROI=dict(argstr='--fixedImageROI %s', ),
metricTolerance=dict(argstr='--metricTolerance %f', ),
movingImage=dict(
argstr='%s',
position=-1,
),
numIterations=dict(argstr='--numIterations %d', ),
numLineIterations=dict(argstr='--numLineIterations %d', ),
resampledImage=dict(
argstr='--resampledImage %s',
hash_files=False,
),
saveTransform=dict(
argstr='--saveTransform %s',
hash_files=False,
),
stepSize=dict(argstr='--stepSize %f', ),
stepTolerance=dict(argstr='--stepTolerance %f', ),
)
inputs = MultiResolutionAffineRegistration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MultiResolutionAffineRegistration_outputs():
output_map = dict(
resampledImage=dict(),
saveTransform=dict(),
)
outputs = MultiResolutionAffineRegistration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py | 1,783 | AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT | 51 | en | 0.653745 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
# -- Project information -----------------------------------------------------
project = 'bettermoments'
copyright = '2019, Richard Teague'
author = 'Richard Teague'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.imgmath',
# 'nbsphinx',
]
# Is this really necessary...
autodoc_mock_imports = ['astropy', 'scipy', 'argparse', 'numpy']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Readthedocs.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| docs/conf.py | 2,328 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 'nbsphinx', Is this really necessary... Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Readthedocs. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,518 | en | 0.69746 |
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
import json
from re import compile
def boolean(x):
if x in [True, 1, '1', 'true', 'True']:
return "true"
if x in [False, 0, '0', 'false', 'False']:
return "false"
raise ValueError
def integer(x):
try:
int(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid integer" % x)
else:
return x
def positive_integer(x):
p = integer(x)
if int(p) < 0:
raise ValueError("%r is not a positive integer" % x)
return x
def integer_range(minimum_val, maximum_val):
def integer_range_checker(x):
i = int(x)
if i < minimum_val or i > maximum_val:
raise ValueError('Integer must be between %d and %d' % (
minimum_val, maximum_val))
return x
return integer_range_checker
def integer_list_item(allowed_values):
def integer_list_item_checker(x):
i = positive_integer(x)
if i in allowed_values:
return x
raise ValueError('Integer must be one of following: %s' %
', '.join(str(j) for j in allowed_values))
return integer_list_item_checker
def double(x):
try:
float(x)
except (ValueError, TypeError):
raise ValueError("%r is not a valid double" % x)
else:
return x
def ignore(x):
"""Method to indicate bypassing property validation"""
return x
def defer(x):
"""Method to indicate defering property validation"""
return x
def network_port(x):
from . import AWSHelperFn
# Network ports can be Ref items
if isinstance(x, AWSHelperFn):
return x
i = integer(x)
if int(i) < -1 or int(i) > 65535:
raise ValueError("network port %r must been between 0 and 65535" % i)
return x
def tg_healthcheck_port(x):
if isinstance(x, str) and x == "traffic-port":
return x
return network_port(x)
def s3_bucket_name(b):
# consecutive periods not allowed
if '..' in b:
raise ValueError("%s is not a valid s3 bucket name" % b)
# IP addresses not allowed
ip_re = compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
if ip_re.match(b):
raise ValueError("%s is not a valid s3 bucket name" % b)
s3_bucket_name_re = compile(r'^[a-z\d][a-z\d\.-]{1,61}[a-z\d]$')
if s3_bucket_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid s3 bucket name" % b)
def elb_name(b):
elb_name_re = compile(r'^[a-zA-Z0-9](?:[a-zA-Z0-9\-]{0,30}[a-zA-Z0-9]{1})?$') # noqa
if elb_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid elb name" % b)
def encoding(encoding):
valid_encodings = ['plain', 'base64']
if encoding not in valid_encodings:
raise ValueError('Encoding needs to be one of %r' % valid_encodings)
return encoding
def status(status):
valid_statuses = ['Active', 'Inactive']
if status not in valid_statuses:
raise ValueError('Status needs to be one of %r' % valid_statuses)
return status
def s3_transfer_acceleration_status(value):
valid_status = ['Enabled', 'Suspended']
if value not in valid_status:
raise ValueError(
'AccelerationStatus must be one of: "%s"' % (
', '.join(valid_status)
)
)
return value
def iam_names(b):
iam_name_re = compile(r'^[a-zA-Z0-9_\.\+\=\@\-\,]+$')
if iam_name_re.match(b):
return b
else:
raise ValueError("%s is not a valid iam name" % b)
def iam_user_name(user_name):
if not user_name:
raise ValueError(
"AWS::IAM::User property 'UserName' may not be empty")
if len(user_name) > 64:
raise ValueError(
"AWS::IAM::User property 'UserName' may not exceed 64 characters")
iam_user_name_re = compile(r'^[\w+=,.@-]+$')
if iam_user_name_re.match(user_name):
return user_name
else:
raise ValueError(
"%s is not a valid value for AWS::IAM::User property 'UserName'",
user_name)
def iam_path(path):
if len(path) > 512:
raise ValueError('IAM path %s may not exceed 512 characters', path)
iam_path_re = compile(r'^\/.*\/$|^\/$')
if not iam_path_re.match(path):
raise ValueError("%s is not a valid iam path name" % path)
return path
def iam_role_name(role_name):
if len(role_name) > 64:
raise ValueError('IAM Role Name may not exceed 64 characters')
iam_names(role_name)
return role_name
def iam_group_name(group_name):
if len(group_name) > 128:
raise ValueError('IAM Role Name may not exceed 128 characters')
iam_names(group_name)
return group_name
def mutually_exclusive(class_name, properties, conditionals):
from . import NoValue
found_list = []
for c in conditionals:
if c in properties and not properties[c] == NoValue:
found_list.append(c)
seen = set(found_list)
specified_count = len(seen)
if specified_count > 1:
raise ValueError(('%s: only one of the following'
' can be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def exactly_one(class_name, properties, conditionals):
specified_count = mutually_exclusive(class_name, properties, conditionals)
if specified_count != 1:
raise ValueError(('%s: one of the following'
' must be specified: %s') % (
class_name, ', '.join(conditionals)))
return specified_count
def check_required(class_name, properties, conditionals):
for c in conditionals:
if c not in properties:
raise ValueError("Resource %s required in %s" % c, class_name)
def json_checker(name, prop):
from . import AWSHelperFn
if isinstance(prop, basestring):
# Verify it is a valid json string
json.loads(prop)
return prop
elif isinstance(prop, dict):
# Convert the dict to a basestring
return json.dumps(prop)
elif isinstance(prop, AWSHelperFn):
return prop
else:
raise ValueError("%s must be a str or dict" % name)
def notification_type(notification):
valid_notifications = ['Command', 'Invocation']
if notification not in valid_notifications:
raise ValueError(
'NotificationType must be one of: "%s"' % (
', '.join(valid_notifications)
)
)
return notification
def notification_event(events):
valid_events = ['All', 'InProgress', 'Success', 'TimedOut', 'Cancelled',
'Failed']
for event in events:
if event not in valid_events:
raise ValueError(
'NotificationEvents must be at least one of: "%s"' % (
', '.join(valid_events)
)
)
return events
def task_type(task):
valid_tasks = ['RUN_COMMAND', 'AUTOMATION', 'LAMBDA', 'STEP_FUNCTION']
if task not in valid_tasks:
raise ValueError(
'TaskType must be one of: "%s"' % (
', '.join(valid_tasks)
)
)
return task
def compliance_level(level):
valid_levels = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'INFORMATIONAL',
'UNSPECIFIED']
if level not in valid_levels:
raise ValueError(
'ApprovedPatchesComplianceLevel must be one of: "%s"' % (
', '.join(valid_levels)
)
)
return level
def operating_system(os):
valid_os = ['WINDOWS', 'AMAZON_LINUX', 'AMAZON_LINUX_2', 'UBUNTU',
'REDHAT_ENTERPRISE_LINUX', 'SUSE', 'CENTOS']
if os not in valid_os:
raise ValueError(
'OperatingSystem must be one of: "%s"' % (
', '.join(valid_os)
)
)
return os
def vpn_pre_shared_key(key):
pre_shared_key_match_re = compile(
r'^(?!0)([A-Za-z0-9]|\_|\.){8,64}$'
)
if not pre_shared_key_match_re.match(key):
raise ValueError(
'%s is not a valid key.'
' Allowed characters are alphanumeric characters and ._. Must'
' be between 8 and 64 characters in length and cannot'
' start with zero (0).' % key
)
return(key)
def vpn_tunnel_inside_cidr(cidr):
reserved_cidrs = [
'169.254.0.0/30',
'169.254.1.0/30',
'169.254.2.0/30',
'169.254.3.0/30',
'169.254.4.0/30',
'169.254.5.0/30',
'169.254.169.252/30'
]
cidr_match_re = compile(
r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)"
r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$"
)
if cidr in reserved_cidrs:
raise ValueError(
'The following CIDR blocks are reserved and cannot be used: "%s"' %
(', '.join(reserved_cidrs))
)
elif not cidr_match_re.match(cidr):
raise ValueError(
'%s is not a valid CIDR.'
' A size /30 CIDR block from the 169.254.0.0/16 must be specified.'
% cidr)
return(cidr)
def vpc_endpoint_type(endpoint_type):
valid_types = ['Interface', 'Gateway']
if endpoint_type not in valid_types:
raise ValueError(
'VpcEndpointType must be one of: "%s"' % (
', '.join(valid_types)
)
)
return(endpoint_type)
def scalable_dimension_type(scalable_dimension):
valid_values = ['autoscaling:autoScalingGroup:DesiredCapacity',
'ecs:service:DesiredCount',
'ec2:spot-fleet-request:TargetCapacity',
'rds:cluster:ReadReplicaCount',
'dynamodb:table:ReadCapacityUnits',
'dynamodb:table:WriteCapacityUnits',
'dynamodb:index:ReadCapacityUnits',
'dynamodb:index:WriteCapacityUnits'
]
if scalable_dimension not in valid_values:
raise ValueError(
'ScalableDimension must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(scalable_dimension)
def service_namespace_type(service_namespace):
valid_values = ['autoscaling', 'ecs', 'ec2', 'rds', 'dynamodb']
if service_namespace not in valid_values:
raise ValueError(
'ServiceNamespace must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(service_namespace)
def statistic_type(statistic):
valid_values = ['Average', 'Minimum', 'Maximum',
'SampleCount', 'Sum'
]
if statistic not in valid_values:
raise ValueError(
'Statistic must be one of: "%s"' % (
', '.join(valid_values)
)
)
return(statistic)
| troposphere/validators.py | 11,014 | Method to indicate defering property validation
Method to indicate bypassing property validation
Copyright (c) 2012-2013, Mark Peek <mark@peek.org> All rights reserved. See LICENSE file for full license. Network ports can be Ref items consecutive periods not allowed IP addresses not allowed noqa Verify it is a valid json string Convert the dict to a basestring | 364 | en | 0.73504 |
from . import database_connect
from .exceptions import IrodsError, IrodsWarning
import logging
import re
def run_update(irods_config, cursor):
l = logging.getLogger(__name__)
new_schema_version = database_connect.get_schema_version_in_database(cursor) + 1
l.info('Updating to schema version %d...', new_schema_version)
if new_schema_version == 2:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listQueryByAliasLike', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias LIKE ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('findQueryByAlias', 'SELECT alias, sqlStr FROM R_SPECIFIC_QUERY WHERE alias = ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLACollections', 'SELECT c.parent_coll_name, c.coll_name, c.create_ts, c.modify_ts, c.coll_id, c.coll_owner_name, c.coll_owner_zone, c.coll_type, u.user_name, u.zone_name, a.access_type_id, u.user_id FROM R_COLL_MAIN c JOIN R_OBJT_ACCESS a ON c.coll_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id WHERE c.parent_coll_name = ? ORDER BY c.coll_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('ilsLADataObjects', 'SELECT s.coll_name, s.data_name, s.create_ts, s.modify_ts, s.data_id, s.data_size, s.data_repl_num, s.data_owner_name, s.data_owner_zone, u.user_name, u.user_id, a.access_type_id, u.user_type_name, u.zone_name FROM ( SELECT c.coll_name, d.data_name, d.create_ts, d.modify_ts, d.data_id, d.data_repl_num, d.data_size, d.data_owner_name, d.data_owner_zone FROM R_COLL_MAIN c JOIN R_DATA_MAIN d ON c.coll_id = d.coll_id WHERE c.coll_name = ? ORDER BY d.data_name) s JOIN R_OBJT_ACCESS a ON s.data_id = a.object_id JOIN R_USER_MAIN u ON a.user_id = u.user_id ORDER BY s.coll_name, s.data_name, u.user_name, a.access_type_id DESC LIMIT ? OFFSET ?', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsOwnedByUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_COLL_MAIN.coll_owner_name = ? AND R_COLL_MAIN.coll_owner_zone = ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listSharedCollectionsSharedWithUser', 'SELECT DISTINCT R_COLL_MAIN.coll_id, R_COLL_MAIN.parent_coll_name, R_COLL_MAIN.coll_name, R_COLL_MAIN.coll_owner_name, R_COLL_MAIN.coll_owner_zone, R_META_MAIN.meta_attr_name, R_META_MAIN.meta_attr_value, R_META_MAIN.meta_attr_unit, R_USER_MAIN.user_name, R_USER_MAIN.zone_name, R_OBJT_ACCESS.access_type_id FROM R_COLL_MAIN JOIN R_OBJT_METAMAP ON R_COLL_MAIN.coll_id = R_OBJT_METAMAP.object_id JOIN R_META_MAIN ON R_OBJT_METAMAP.meta_id = R_META_MAIN.meta_id JOIN R_OBJT_ACCESS ON R_COLL_MAIN.coll_id = R_OBJT_ACCESS.object_id JOIN R_USER_MAIN ON R_OBJT_ACCESS.user_id = R_USER_MAIN.user_id WHERE R_META_MAIN.meta_attr_unit = ''iRODSUserTagging:Share'' AND R_USER_MAIN.user_name = ? AND R_USER_MAIN.zone_name = ? AND R_COLL_MAIN.coll_owner_name <> ? ORDER BY R_COLL_MAIN.parent_coll_name ASC, R_COLL_MAIN.coll_name ASC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForDataObjViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, USER_GROUP_MAIN.user_name, R_DATA_MAIN.data_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_DATA_MAIN JOIN R_COLL_MAIN ON R_DATA_MAIN.coll_id = R_COLL_MAIN.coll_id ON R_OBJT_ACCESS.object_id = R_DATA_MAIN.data_id WHERE R_COLL_MAIN.coll_name = ? AND R_DATA_MAIN.data_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_DATA_MAIN.data_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('listUserACLForCollectionViaGroup', 'SELECT R_USER_MAIN.user_name, R_USER_MAIN.user_id, R_OBJT_ACCESS.access_type_id, R_USER_MAIN.user_type_name, R_USER_MAIN.zone_name, R_COLL_MAIN.coll_name, USER_GROUP_MAIN.user_name, R_COLL_MAIN.coll_name FROM R_USER_MAIN AS USER_GROUP_MAIN JOIN R_USER_GROUP JOIN R_USER_MAIN ON R_USER_GROUP.user_id = R_USER_MAIN.user_id ON USER_GROUP_MAIN.user_id = R_USER_GROUP.group_user_id JOIN R_OBJT_ACCESS ON R_USER_GROUP.group_user_id = R_OBJT_ACCESS.user_id JOIN R_COLL_MAIN ON R_OBJT_ACCESS.object_id = R_COLL_MAIN.coll_id WHERE R_COLL_MAIN.coll_name = ? AND R_USER_MAIN.user_name = ? ORDER BY R_COLL_MAIN.coll_name, R_USER_MAIN.user_name, R_OBJT_ACCESS.access_type_id DESC', '1388534400');")
elif new_schema_version == 3:
database_connect.execute_sql_statement(cursor, "insert into R_SPECIFIC_QUERY (alias, sqlStr, create_ts) values ('DataObjInCollReCur', 'WITH coll AS (SELECT coll_id, coll_name FROM r_coll_main WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_hier FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num', '1388534400');")
elif new_schema_version == 4:
database_connect.execute_sql_statement(cursor, "create index idx_quota_main1 on R_QUOTA_MAIN (user_id);")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'domainadmin';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'rodscurators';")
database_connect.execute_sql_statement(cursor, "delete from R_TOKN_MAIN where token_name = 'storageadmin';")
if irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "delete from R_SPECIFIC_QUERY where alias = 'DataObjInCollReCur';")
elif new_schema_version == 5:
if irods_config.catalog_database_type == 'oracle':
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id integer;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar2(4000);") # max oracle varchar2 for sql is 4000, 32767 pl/sql
else:
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_DATA_MAIN ADD resc_id bigint;")
database_connect.execute_sql_statement(cursor, "ALTER TABLE R_RESC_MAIN ADD resc_parent_context varchar(4000);")
database_connect.execute_sql_statement(cursor, "UPDATE R_SPECIFIC_QUERY SET sqlstr='WITH coll AS (SELECT coll_id, coll_name FROM R_COLL_MAIN WHERE R_COLL_MAIN.coll_name = ? OR R_COLL_MAIN.coll_name LIKE ?) SELECT DISTINCT d.data_id, (SELECT coll_name FROM coll WHERE coll.coll_id = d.coll_id) coll_name, d.data_name, d.data_repl_num, d.resc_name, d.data_path, d.resc_id FROM R_DATA_MAIN d WHERE d.coll_id = ANY(ARRAY(SELECT coll_id FROM coll)) ORDER BY coll_name, d.data_name, d.data_repl_num' where alias='DataObjInCollReCur';")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_name from R_RESC_MAIN;").fetchall()
for row in rows:
resc_id = row[0]
resc_name = row[1]
database_connect.execute_sql_statement(cursor, "update R_DATA_MAIN set resc_id=? where resc_hier=? or resc_hier like ?", resc_id, resc_name, ''.join(['%;', resc_name]))
if irods_config.catalog_database_type == 'postgres':
database_connect.execute_sql_statement(cursor, "update r_resc_main as rdm set resc_parent = am.resc_id from ( select resc_name, resc_id from r_resc_main ) as am where am.resc_name = rdm.resc_parent;")
elif irods_config.catalog_database_type == 'cockroachdb':
rows = database_connect.execute_sql_statement(cursor, "select rdm.resc_id, am.resc_id from r_resc_main rdm, r_resc_main am where am.resc_name = rdm.resc_parent;").fetchall()
for row in rows:
resc_id = row[0]
resc_id2 = row[1]
database_connect.execute_sql_statement(cursor, "update r_resc_main set resc_parent = ? where resc_id = ?;", resc_id2, resc_id)
elif irods_config.catalog_database_type == 'mysql':
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN as rdm, ( select resc_name, resc_id from R_RESC_MAIN ) as am set rdm.resc_parent = am.resc_id where am.resc_name = rdm.resc_parent;")
else:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN rdm set resc_parent = ( select resc_id from ( select resc_name, resc_id from R_RESC_MAIN ) am where am.resc_name = rdm.resc_parent );")
rows = database_connect.execute_sql_statement(cursor, "select resc_id, resc_children from R_RESC_MAIN where resc_children is not null;").fetchall()
context_expression = re.compile('^([^{}]*)\\{([^{}]*)\\}')
for row in rows:
resc_id = row[0]
child_contexts = [(m.group(1), m.group(2)) for m in [context_expression.match(s) for s in row[1].split(';')] if m]
for child_name, context in child_contexts:
database_connect.execute_sql_statement(cursor, "update R_RESC_MAIN set resc_parent_context=? where resc_name=?", context, child_name)
else:
raise IrodsError('Upgrade to schema version %d is unsupported.' % (new_schema_version))
database_connect.execute_sql_statement(cursor, "update R_GRID_CONFIGURATION set option_value = ? where namespace = 'database' and option_name = 'schema_version';", new_schema_version)
| scripts/irods/database_upgrade.py | 10,790 | max oracle varchar2 for sql is 4000, 32767 pl/sql | 49 | en | 0.304903 |
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
| robovat/simulation/simulator.py | 13,949 | The Simulator class.
Delete the simulator.
Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
Clear all visualization items.
Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
Remove the body.
Args:
body: An instance of Body.
Reset the simulation.
Start the simulation.
Take a simulation step.
Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
The Simulator class.
Create the physics backend. Create the body. Add the body to the dictionary. Create the constraint. Add the constraint to the dictionary. Check if all bodies are stable. | 3,489 | en | 0.774642 |
# -*- coding: utf-8 -*-
from openprocurement.planning.api.utils import opresource
from openprocurement.api.utils import get_now, context_unpack, json_view
from openprocurement.planning.api.validation import validate_plan_not_terminated
from openprocurement.api.validation import validate_file_update, validate_file_upload, validate_patch_document_data
from openprocurement.planning.api.views.plan_document import PlansDocumentResource
@opresource(
name="Plan Milestone Documents",
collection_path="/plans/{plan_id}/milestones/{milestone_id}/documents",
path="/plans/{plan_id}/milestones/{milestone_id}/documents/{document_id}",
description="Plan milestone related files",
)
class PlanMilestoneDocumentResource(PlansDocumentResource):
def update_modified_dates(self):
plan = self.request.validated["plan"]
milestone = self.request.validated["milestone"]
plan.dateModified = milestone.dateModified = get_now()
plan.modified = False
@json_view(
permission="update_milestone",
validators=(validate_file_upload, validate_plan_not_terminated)
)
def collection_post(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).collection_post()
@json_view(
permission="update_milestone",
validators=(validate_file_update, validate_plan_not_terminated)
)
def put(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).put()
@json_view(
content_type="application/json",
permission="update_milestone",
validators=(validate_patch_document_data, validate_plan_not_terminated),
)
def patch(self):
self.update_modified_dates()
return super(PlanMilestoneDocumentResource, self).patch()
def _post_document_log(self, document):
self.LOGGER.info(
"Created plan milestone document {}".format(document.id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "plan_milestone_document_create"},
{"document_id": document.id}
),
)
def _put_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_put"}),
)
def _patch_document_log(self):
self.LOGGER.info(
"Updated plan milestone document {}".format(self.request.context.id),
extra=context_unpack(self.request,
{"MESSAGE_ID": "plan_milestone_document_patch"}),
)
| src/openprocurement/planning/api/views/plan_milestone_document.py | 2,730 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import rospy
import cv2
import sensor_msgs
import numpy as np
from cv_bridge import CvBridge
class CameraPublisher(object):
""" """
def __init__(self):
"""Default constructor"""
self.rgb_image_topic = rospy.get_param("~rgb_image_topic", "/camera/rgb/image_raw")
self.camera_publisher = rospy.Publisher(self.rgb_image_topic, sensor_msgs.msg.Image, queue_size=1)
self.camera_pub_frequency = rospy.get_param("~camera_pub_frequency", 20)
self.bridge = CvBridge()
self.camera_info_topic = rospy.get_param("~camera_info_topic", "/camera/rgb/camera_info")
self.camera_info = sensor_msgs.msg.CameraInfo()
self.camera_info_publisher = rospy.Publisher(self.camera_info_topic, sensor_msgs.msg.CameraInfo, queue_size=1)
self.camera_frame_id = rospy.get_param("~camera_frame_id", "camera_link")
self.camera_info.header.frame_id = self.camera_frame_id
self.capture = cv2.VideoCapture(0)
ok, frame = self.capture.read()
width, height, _ = frame.shape
focal_length = height
center = (height/2, width/2)
camera_matrix = np.array([[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double")
P_matrix = np.array([[focal_length, 0, center[0], 0],
[0, focal_length, center[1], 0],
[0, 0, 1, 0]], dtype="double")
dist_coeffs = np.zeros((4, 1))
self.camera_info.D = list(dist_coeffs)
self.camera_info.K = list(camera_matrix.flatten())
self.camera_info.P = list(P_matrix.flatten())
self.timer = rospy.Timer(rospy.Duration(1.0/self.camera_pub_frequency), self.timer_callback)
rospy.loginfo("Camera publisher ready !")
while not rospy.is_shutdown():
rospy.spin()
self.capture.release()
def timer_callback(self, event):
ok, frame = self.capture.read()
if ok:
bgr_image_msg = self.bridge.cv2_to_imgmsg(frame, "bgr8")
bgr_image_msg.header.stamp = rospy.Time().now()
self.camera_info.header = bgr_image_msg.header
bgr_image_msg.header.frame_id = self.camera_frame_id
self.camera_publisher.publish(bgr_image_msg)
self.camera_info_publisher.publish(self.camera_info)
if __name__ == '__main__':
rospy.init_node("camera_publisher", anonymous=False)
c = CameraPublisher()
| scripts/camera_publisher_node.py | 2,579 | Default constructor
!/usr/bin/env python -*- coding: UTF-8 -*- | 63 | en | 0.251276 |
import time
import random
import os
import logging
import sys
from datetime import datetime
from datetime import timedelta
from urllib.parse import urlparse
from urllib.parse import urljoin
import click
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
from models import db
from models import Following
from models import Comment
from models import Like
username = os.environ['instagram_username']
password = os.environ['instagram_password']
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(
stream=sys.stdout,
level='INFO',
format='%(asctime)s %(levelname)s:%(name)s:%(message)s'
)
log = logging.getLogger('app')
def sleep(duration):
log.info('Sleeping for {} seconds'.format(duration))
time.sleep(duration)
def have_like(p):
return random.randint(1, 100) < p
def get_url(driver):
url = urlparse(driver.current_url)
return urljoin('{}://{}'.format(url.scheme, url.netloc), url.path)
def get_driver(gui=True):
options = webdriver.ChromeOptions()
if not gui:
options.add_argument('headless')
options.add_argument('--no-sandbox')
options.add_argument('window-size=1200x600')
driver = webdriver.Chrome(
executable_path='/usr/local/bin/chromedriver',
chrome_options=options
)
driver.implicitly_wait(15)
return driver
def login(driver, username, password):
login_btn = driver.find_element_by_xpath("//p[@class='izU2O']/a[text()='Log in']")
login_btn.click()
sleep(5)
login_input = driver.find_element_by_xpath("//INPUT[@name='username']")
login_input.send_keys(username)
password_input = driver.find_element_by_xpath("//INPUT[@type='password']")
password_input.send_keys(password)
password_input.send_keys(Keys.RETURN)
sleep(10)
def search(driver, tag):
driver.get('https://www.instagram.com/explore/tags/{tag}/'.format(tag=tag))
sleep(4)
first_image = driver.find_element_by_xpath(
"//article/div[2]/div[1]/div[1]/div[1]"
)
first_image.click()
sleep(2)
def go_to_next_photo(driver):
try:
nex_btn = driver.find_element_by_xpath(
"//a[contains(@class, coreSpriteRightPaginationArrow)][text()='Next']"
)
except Exception:
driver.save_screenshot('screenshot.png')
else:
nex_btn.click()
time.sleep(1)
def is_already_liked(driver):
try:
driver.find_element_by_xpath("//span[@aria-label='Like']")
except NoSuchElementException:
log.info('Picture has already been liked {}'.format(driver.current_url))
return True
else:
log.info('Picture has NOT been liked yet {}'.format(driver.current_url))
return False
def like_post(driver):
url = get_url(driver)
try:
Like.select().where(Like.url == url).get()
except Like.DoesNotExist:
pass
else:
log.info('Post has already been liked {url}'.format(url=url))
return False
try:
like_btn = driver.find_element_by_xpath("//span[@aria-label='Like']")
except NoSuchElementException:
log.info('Could not find like button {}'.format(driver.current_url))
time.sleep(1)
return False
else:
log.info('Found like button. Trying to like {}'.format(driver.current_url))
like_btn.click()
Like.create(url=url)
log.info('Liked picture {url}'.format(url=url))
return True
def comment_post(driver, text):
url = get_url(driver)
try:
Comment.select().where(Comment.url == url).get()
except Comment.DoesNotExist:
pass
else:
log.info('Post has already been commented {url}'.format(url=url))
return False
try:
comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
except NoSuchElementException as e:
log.info(e)
return False
else:
# comment_input.click()
# comment_input.clear()
# time.sleep(1)
# comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
# --------------------
driver.execute_script(
"arguments[0].value = '{} ';".format(text), comment_input
)
# An extra space is added here and then deleted.
# This forces the input box to update the reactJS core
comment_input.send_keys("\b")
comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
comment_input.submit()
# --------------------
# comment_input.send_keys(text)
# comment_input.send_keys(Keys.RETURN)
# comment_input.clear()
Comment.create(url=url, comment=text)
log.info('Commented picture {url} with "{text}"'.format(url=url, text=text))
time.sleep(1)
return True
def subscribe(driver):
name_label = driver.find_element_by_xpath("//article/header//div[@class='e1e1d']/a[text()]")
name = name_label.text
follow_btn = driver.find_element_by_xpath("//article/header/div//button[text()]")
try:
following = Following.select().where(Following.name == name).get()
except Following.DoesNotExist:
pass
else:
log.info(
'Already subscribed on user: @{user} ({following})'.format(
user=name,
following=following
)
)
return False
btn_text = follow_btn.text
if btn_text == 'Follow':
log.info('Going to subscribe on user: @{user}'.format(user=name))
try:
follow_btn.click()
time.sleep(1)
except Exception as e:
log.info(e)
else:
Following.create(name=name)
return True
else:
log.info('Already subscribed on user: @{user}'.format(user=name))
return False
def get_random_comment():
comments = [
'Nice',
'Nice photo',
'Nice picture',
'Nice capture',
'Nice image',
'Nice shot',
'Great photo',
'Great job',
'Awesome picture',
'awesome shot',
'Like it',
'Like this picture',
'Like this photo',
'Like this image',
'Beautiful',
'Beautiful photo',
'Beautiful picture',
'Lovely picture',
'Lovely photo',
'Amazing',
'Amazing shot',
'Amazing capture',
'Amazing photo',
'Wonderful shot',
'Wonderful picture',
'Wonderful photo',
]
return random.choice(comments)
@click.group()
def cli():
pass
@cli.command()
@click.option('--tag', default='landscape', help='Instagram tag')
@click.option('--count', default=100, help='Number of user to follow')
@click.option('--gui/--no-gui', default=True, help='GUI')
def run_follower(tag, count, gui):
driver = get_driver(gui)
driver.get("https://www.instagram.com/")
login(driver, username=username, password=password)
search(driver, tag=tag)
liked = 0
commented = 0
subscribed = 0
while liked < count:
go_to_next_photo(driver)
was_liked = like_post(driver)
if was_liked:
liked += 1
# if have_like(15) and comment_post(driver, text=get_random_comment()):
# if comment_post(driver, text=get_random_comment()):
# commented += 1
if have_like(33) and subscribe(driver):
subscribed += 1
log.info('Liked: {}, Commented: {} Subscribed {}'.format(liked, commented, subscribed))
if was_liked:
duration = random.randint(20, 60)
sleep(duration)
else:
duration = random.randint(1, 8)
sleep(duration)
driver.close()
@cli.command()
@click.option('--count', default=100, help='Number of user to follow')
@click.option('--gui/--no-gui', default=True, help='GUI')
def run_unfollower(count, gui):
initial_count = count
driver = get_driver(gui)
driver.implicitly_wait(3)
driver.get("https://www.instagram.com/")
login(driver, username=username, password=password)
following_users = (
Following.select()
.where(
Following.is_following == True,
Following.date_created < datetime.now() - timedelta(days=14)
)
.order_by(Following.date_created)
)
for following in following_users:
if count <= 0:
return
log.info(
'Going to unfollow `@{user}` ({date})'.format(
user=following.name, date=following.date_created
)
)
driver.get("https://www.instagram.com/{name}".format(name=following.name))
time.sleep(1)
try:
unfollow_btn = driver.find_element_by_xpath("//button[text()='Following']")
except NoSuchElementException:
still_following = False
log.info('Already not following user `@{user}`'.format(user=following.name))
following.is_following = False
following.save()
else:
log.info('Still following user `@{user}`'.format(user=following.name))
still_following = True
unfollow_btn.click()
duration = random.randint(5, 10)
sleep(duration)
try:
unfollow_btn = driver.find_element_by_xpath(
"//div[@class='piCib']//button[text()='Unfollow']"
)
except NoSuchElementException:
pass
else:
still_following = True
unfollow_btn.click()
sleep(2)
tries = 0
while still_following:
driver.refresh()
try:
driver.find_element_by_xpath("//button[text()='Follow']")
except NoSuchElementException:
pass
else:
still_following = False
count -= 1
try:
driver.find_element_by_xpath("//button[text()='Follow Back']")
except NoSuchElementException:
pass
else:
still_following = False
count -= 1
if still_following:
try:
unfollow_btn = driver.find_element_by_xpath("//button[text()='Following']")
except NoSuchElementException:
pass
else:
log.info(
'Still following user `@{user}` (tries {tries})'.format(
user=following.name,
tries=tries
)
)
still_following = True
unfollow_btn.click()
if tries == 0:
break
tries += 1
log.info('-- {count} of {initial_count} users are unfollowed --'.format(
count=initial_count - count, initial_count=initial_count
))
driver.close()
@cli.command()
def init_db():
db.connect()
db.create_tables([Following, Comment, Like])
if __name__ == "__main__":
cli()
| main.py | 11,363 | comment_input.click() comment_input.clear() time.sleep(1) comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]') -------------------- An extra space is added here and then deleted. This forces the input box to update the reactJS core -------------------- comment_input.send_keys(text) comment_input.send_keys(Keys.RETURN) comment_input.clear() if have_like(15) and comment_post(driver, text=get_random_comment()): if comment_post(driver, text=get_random_comment()): commented += 1 | 519 | en | 0.368244 |
from datetime import datetime
from snuba.clickhouse.query_dsl.accessors import get_time_range
from snuba.datasets.factory import get_dataset
from snuba.datasets.plans.translator.query import identity_translate
from snuba.query.parser import parse_query
from snuba.query.processors.timeseries_processor import TimeSeriesProcessor
from snuba.request.request_settings import HTTPRequestSettings
def test_get_time_range() -> None:
"""
Test finding the time range of a query.
"""
body = {
"selected_columns": ["event_id"],
"conditions": [
("timestamp", ">=", "2019-09-18T10:00:00"),
("timestamp", ">=", "2000-09-18T10:00:00"),
("timestamp", "<", "2019-09-19T12:00:00"),
[("timestamp", "<", "2019-09-18T12:00:00"), ("project_id", "IN", [1])],
("project_id", "IN", [1]),
],
}
events = get_dataset("events")
query = parse_query(body, events)
processors = events.get_default_entity().get_query_processors()
for processor in processors:
if isinstance(processor, TimeSeriesProcessor):
processor.process_query(query, HTTPRequestSettings())
from_date_ast, to_date_ast = get_time_range(identity_translate(query), "timestamp")
assert (
from_date_ast is not None
and isinstance(from_date_ast, datetime)
and from_date_ast.isoformat() == "2019-09-18T10:00:00"
)
assert (
to_date_ast is not None
and isinstance(to_date_ast, datetime)
and to_date_ast.isoformat() == "2019-09-19T12:00:00"
)
| tests/clickhouse/query_dsl/test_time_range.py | 1,583 | Test finding the time range of a query. | 39 | en | 0.901702 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import msgpackutils
from oslo_utils import timeutils
from six.moves import map
from keystone.common import cache
from keystone.common import utils
# The set of attributes common between the RevokeEvent
# and the dictionaries created from the token Data.
_NAMES = ['trust_id',
'consumer_id',
'access_token_id',
'audit_id',
'audit_chain_id',
'expires_at',
'domain_id',
'project_id',
'user_id',
'role_id']
# Additional arguments for creating a RevokeEvent
_EVENT_ARGS = ['issued_before', 'revoked_at']
# Names of attributes in the RevocationEvent, including "virtual" attributes.
# Virtual attributes are those added based on other values.
_EVENT_NAMES = _NAMES + ['domain_scope_id']
# Values that will be in the token data but not in the event.
# These will compared with event values that have different names.
# For example: both trustor_id and trustee_id are compared against user_id
_TOKEN_KEYS = ['identity_domain_id',
'assignment_domain_id',
'issued_at',
'trustor_id',
'trustee_id']
# Alternative names to be checked in token for every field in
# revoke tree.
ALTERNATIVES = {
'user_id': ['user_id', 'trustor_id', 'trustee_id'],
'domain_id': ['identity_domain_id', 'assignment_domain_id'],
# For a domain-scoped token, the domain is in assignment_domain_id.
'domain_scope_id': ['assignment_domain_id', ],
}
REVOKE_KEYS = _NAMES + _EVENT_ARGS
def blank_token_data(issued_at):
token_data = dict()
for name in _NAMES:
token_data[name] = None
for name in _TOKEN_KEYS:
token_data[name] = None
# required field
token_data['issued_at'] = issued_at
return token_data
class RevokeEvent(object):
def __init__(self, **kwargs):
for k in REVOKE_KEYS:
v = kwargs.get(k)
setattr(self, k, v)
if self.domain_id and self.expires_at:
# This is revoking a domain-scoped token.
self.domain_scope_id = self.domain_id
self.domain_id = None
else:
# This is revoking all tokens for a domain.
self.domain_scope_id = None
if self.expires_at is not None:
# Trim off the expiration time because MySQL timestamps are only
# accurate to the second.
self.expires_at = self.expires_at.replace(microsecond=0)
if self.revoked_at is None:
self.revoked_at = timeutils.utcnow()
if self.issued_before is None:
self.issued_before = self.revoked_at
def to_dict(self):
keys = ['user_id',
'role_id',
'domain_id',
'domain_scope_id',
'project_id',
'audit_id',
'audit_chain_id',
]
event = {key: self.__dict__[key] for key in keys
if self.__dict__[key] is not None}
if self.trust_id is not None:
event['OS-TRUST:trust_id'] = self.trust_id
if self.consumer_id is not None:
event['OS-OAUTH1:consumer_id'] = self.consumer_id
if self.consumer_id is not None:
event['OS-OAUTH1:access_token_id'] = self.access_token_id
if self.expires_at is not None:
event['expires_at'] = utils.isotime(self.expires_at)
if self.issued_before is not None:
event['issued_before'] = utils.isotime(self.issued_before,
subsecond=True)
return event
def key_for_name(self, name):
return "%s=%s" % (name, getattr(self, name) or '*')
def attr_keys(event):
return list(map(event.key_for_name, _EVENT_NAMES))
def is_revoked(events, token_data):
"""Check if a token matches a revocation event.
Compare a token against every revocation event. If the token matches an
event in the `events` list, the token is revoked. If the token is compared
against every item in the list without a match, it is not considered
revoked from the `revoke_api`.
:param events: a list of RevokeEvent instances
:param token_data: map based on a flattened view of the token. The required
fields are `expires_at`,`user_id`, `project_id`,
`identity_domain_id`, `assignment_domain_id`,
`trust_id`, `trustor_id`, `trustee_id` `consumer_id` and
`access_token_id`
:returns: True if the token matches an existing revocation event, meaning
the token is revoked. False is returned if the token does not
match any revocation events, meaning the token is considered
valid by the revocation API.
"""
return any([matches(e, token_data) for e in events])
def matches(event, token_values):
"""See if the token matches the revocation event.
A brute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event: a RevokeEvent instance
:param token_values: dictionary with set of values taken from the
token
:returns: True if the token matches the revocation event, indicating the
token has been revoked
"""
# If any one check does not match, the whole token does
# not match the event. The numerous return False indicate
# that the token is still valid and short-circuits the
# rest of the logic.
# The token has three attributes that can match the user_id
if event.user_id is not None:
if all(event.user_id != token_values[attribute_name]
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']):
return False
# The token has two attributes that can match the domain_id
if event.domain_id is not None:
if all(event.domain_id != token_values[attribute_name]
for attribute_name in ['identity_domain_id',
'assignment_domain_id']):
return False
if event.domain_scope_id is not None:
if event.domain_scope_id != token_values['assignment_domain_id']:
return False
# If an event specifies an attribute name, but it does not match,
# the token is not revoked.
attribute_names = ['project_id',
'expires_at', 'trust_id', 'consumer_id',
'access_token_id', 'audit_id', 'audit_chain_id']
for attribute_name in attribute_names:
if getattr(event, attribute_name) is not None:
if (getattr(event, attribute_name) !=
token_values[attribute_name]):
return False
if event.role_id is not None:
roles = token_values['roles']
if all(event.role_id != role for role in roles):
return False
if token_values['issued_at'] > event.issued_before:
return False
return True
def build_token_values_v2(access, default_domain_id):
token_data = access['token']
token_expires_at = timeutils.parse_isotime(token_data['expires'])
# Trim off the microseconds because the revocation event only has
# expirations accurate to the second.
token_expires_at = token_expires_at.replace(microsecond=0)
token_values = {
'expires_at': timeutils.normalize_time(token_expires_at),
'issued_at': timeutils.normalize_time(
timeutils.parse_isotime(token_data['issued_at'])),
'audit_id': token_data.get('audit_ids', [None])[0],
'audit_chain_id': token_data.get('audit_ids', [None])[-1],
}
token_values['user_id'] = access.get('user', {}).get('id')
project = token_data.get('tenant')
if project is not None:
token_values['project_id'] = project['id']
else:
token_values['project_id'] = None
token_values['identity_domain_id'] = default_domain_id
token_values['assignment_domain_id'] = default_domain_id
trust = token_data.get('trust')
if trust is None:
token_values['trust_id'] = None
token_values['trustor_id'] = None
token_values['trustee_id'] = None
else:
token_values['trust_id'] = trust['id']
token_values['trustor_id'] = trust['trustor_id']
token_values['trustee_id'] = trust['trustee_id']
token_values['consumer_id'] = None
token_values['access_token_id'] = None
role_list = []
# Roles are by ID in metadata and by name in the user section
roles = access.get('metadata', {}).get('roles', [])
for role in roles:
role_list.append(role)
token_values['roles'] = role_list
return token_values
def build_token_values(token_data):
token_expires_at = timeutils.parse_isotime(token_data['expires_at'])
# Trim off the microseconds because the revocation event only has
# expirations accurate to the second.
token_expires_at = token_expires_at.replace(microsecond=0)
token_values = {
'expires_at': timeutils.normalize_time(token_expires_at),
'issued_at': timeutils.normalize_time(
timeutils.parse_isotime(token_data['issued_at'])),
'audit_id': token_data.get('audit_ids', [None])[0],
'audit_chain_id': token_data.get('audit_ids', [None])[-1],
}
user = token_data.get('user')
if user is not None:
token_values['user_id'] = user['id']
# Federated users do not have a domain, be defensive and get the user
# domain set to None in the federated user case.
token_values['identity_domain_id'] = user.get('domain', {}).get('id')
else:
token_values['user_id'] = None
token_values['identity_domain_id'] = None
project = token_data.get('project', token_data.get('tenant'))
if project is not None:
token_values['project_id'] = project['id']
# The domain_id of projects acting as domains is None
token_values['assignment_domain_id'] = (
project['domain']['id'] if project['domain'] else None)
else:
token_values['project_id'] = None
domain = token_data.get('domain')
if domain is not None:
token_values['assignment_domain_id'] = domain['id']
else:
token_values['assignment_domain_id'] = None
role_list = []
roles = token_data.get('roles')
if roles is not None:
for role in roles:
role_list.append(role['id'])
token_values['roles'] = role_list
trust = token_data.get('OS-TRUST:trust')
if trust is None:
token_values['trust_id'] = None
token_values['trustor_id'] = None
token_values['trustee_id'] = None
else:
token_values['trust_id'] = trust['id']
token_values['trustor_id'] = trust['trustor_user']['id']
token_values['trustee_id'] = trust['trustee_user']['id']
oauth1 = token_data.get('OS-OAUTH1')
if oauth1 is None:
token_values['consumer_id'] = None
token_values['access_token_id'] = None
else:
token_values['consumer_id'] = oauth1['consumer_id']
token_values['access_token_id'] = oauth1['access_token_id']
return token_values
class _RevokeEventHandler(object):
# NOTE(morganfainberg): There needs to be reserved "registry" entries set
# in oslo_serialization for application-specific handlers. We picked 127
# here since it's waaaaaay far out before oslo_serialization will use it.
identity = 127
handles = (RevokeEvent,)
def __init__(self, registry):
self._registry = registry
def serialize(self, obj):
return msgpackutils.dumps(obj.__dict__, registry=self._registry)
def deserialize(self, data):
revoke_event_data = msgpackutils.loads(data, registry=self._registry)
revoke_event = RevokeEvent(**revoke_event_data)
return revoke_event
cache.register_model_handler(_RevokeEventHandler)
| keystone/models/revoke_model.py | 12,797 | Check if a token matches a revocation event.
Compare a token against every revocation event. If the token matches an
event in the `events` list, the token is revoked. If the token is compared
against every item in the list without a match, it is not considered
revoked from the `revoke_api`.
:param events: a list of RevokeEvent instances
:param token_data: map based on a flattened view of the token. The required
fields are `expires_at`,`user_id`, `project_id`,
`identity_domain_id`, `assignment_domain_id`,
`trust_id`, `trustor_id`, `trustee_id` `consumer_id` and
`access_token_id`
:returns: True if the token matches an existing revocation event, meaning
the token is revoked. False is returned if the token does not
match any revocation events, meaning the token is considered
valid by the revocation API.
See if the token matches the revocation event.
A brute force approach to checking.
Compare each attribute from the event with the corresponding
value from the token. If the event does not have a value for
the attribute, a match is still possible. If the event has a
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
:param event: a RevokeEvent instance
:param token_values: dictionary with set of values taken from the
token
:returns: True if the token matches the revocation event, indicating the
token has been revoked
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The set of attributes common between the RevokeEvent and the dictionaries created from the token Data. Additional arguments for creating a RevokeEvent Names of attributes in the RevocationEvent, including "virtual" attributes. Virtual attributes are those added based on other values. Values that will be in the token data but not in the event. These will compared with event values that have different names. For example: both trustor_id and trustee_id are compared against user_id Alternative names to be checked in token for every field in revoke tree. For a domain-scoped token, the domain is in assignment_domain_id. required field This is revoking a domain-scoped token. This is revoking all tokens for a domain. Trim off the expiration time because MySQL timestamps are only accurate to the second. If any one check does not match, the whole token does not match the event. The numerous return False indicate that the token is still valid and short-circuits the rest of the logic. The token has three attributes that can match the user_id The token has two attributes that can match the domain_id If an event specifies an attribute name, but it does not match, the token is not revoked. Trim off the microseconds because the revocation event only has expirations accurate to the second. Roles are by ID in metadata and by name in the user section Trim off the microseconds because the revocation event only has expirations accurate to the second. Federated users do not have a domain, be defensive and get the user domain set to None in the federated user case. The domain_id of projects acting as domains is None NOTE(morganfainberg): There needs to be reserved "registry" entries set in oslo_serialization for application-specific handlers. We picked 127 here since it's waaaaaay far out before oslo_serialization will use it. | 3,896 | en | 0.871814 |
import numpy as np
import cv2
# To capture webcam live stream, simply change the following line to: cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('./assets/video.mp4')
while (True):
# Capture frame by frame
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# hsv (hue sat value) for the color red
lower_color = np.array([150, 150, 50])
upper_color = np.array([180, 255, 150])
# mask will be anything between range *lower_color to upper_color (Red)
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(frame, frame, mask = mask)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(frame, contours, -1, (200, 255, 0), 4)
if len(contours) > 0:
cv2.putText(mask, 'Relavante Object Detected', (100, 300), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | main.py | 1,110 | To capture webcam live stream, simply change the following line to: cap = cv2.VideoCapture(0) Capture frame by frame hsv (hue sat value) for the color red mask will be anything between range *lower_color to upper_color (Red) | 224 | en | 0.881706 |
'''Ensemble some predictions. '''
import argparse
import collections
import math
from scipy.special import logsumexp
import sys
MODES = ['mean', 'max', 'logsumexp', 'noisy_or', 'log_noisy_or', 'odds_ratio']
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=MODES)
parser.add_argument('files', nargs='+')
parser.add_argument('--weights', '-w', type=lambda x:[float(t) for t in x.split(',')],
help='Comma-separated lit of multiplizer per file')
parser.add_argument('--out-file', '-o', default=None, help='Where to write all output')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def read_preds(fn):
preds = []
with open(fn) as f:
for line in f:
idx, pmid, drug, gene, variant, prob = line.strip().split('\t')
prob = float(prob)
preds.append((pmid, drug, gene, variant, prob))
return preds
def main(OPTS):
preds_all = [read_preds(fn) for fn in OPTS.files]
groups = collections.defaultdict(list)
for i, preds in enumerate(preds_all):
if OPTS.weights:
weight = OPTS.weights[i]
else:
weight = 1.0
for pmid, drug, gene, variant, prob in preds:
groups[(pmid, drug, gene, variant)].append(weight * prob)
results = []
for i , ((pmid, drug, gene, variant), prob_list) in enumerate(groups.items()):
if OPTS.mode == 'mean':
prob = sum(prob_list) / len(prob_list)
elif OPTS.mode == 'max':
prob = max(prob_list)
elif OPTS.mode == 'logsumexp':
prob = logsumexp(prob_list)
elif OPTS.mode == 'noisy_or':
prob_no_rel = 1.0
for p in prob_list:
prob_no_rel *= 1.0 - p
prob =1.0 - prob_no_rel
elif OPTS.mode == 'log_noisy_or':
log_prob_no_rel = 0.0
for p in prob_list:
if p < 1.0:
log_prob_no_rel += math.log(1.0 - p)
else:
log_prob_no_rel -= 1000000
prob = -log_prob_no_rel
elif OPTS.mode == 'odds_ratio':
cur_log_odds = 0.0
for p in prob_list:
cur_log_odds += 10 + 0.001 * p #math.log(p / (1.0 - p) * 100000000)
prob = cur_log_odds
else:
raise ValueError(OPTS.mode)
results.append((i, pmid, drug, gene, variant, prob))
with open(OPTS.out_file, 'w') as f:
for item in results:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(*item))
if __name__ == '__main__':
OPTS = parse_args(sys.argv[1:])
main(OPTS)
| NAACL/ensemble.py | 2,821 | Ensemble some predictions.
math.log(p / (1.0 - p) * 100000000) | 64 | en | 0.336296 |
names = ["John", "Bob", "Dell", "python"];
print(names[0])
print(names[-1])
print(names[-2])
names[0] = "Amina"
print(names[0])
print(names[0:3])
# List methods
numbers = [1, 2, 3, 4, 5]
numbers.append(6)
numbers.insert(0, -1)
numbers.remove(3)
is_there = 1 in numbers
numbers.count(3) # it will return count of 3
# numbers.sort() # Ascending order
numbers.reverse() # descending order
numbers = numbers.copy() # To clone original list
print(is_there)
print(numbers)
print(len(numbers))
numbers.clear()
print(numbers)
| basic/list.py | 525 | List methods it will return count of 3 numbers.sort() Ascending order descending order To clone original list | 110 | en | 0.633337 |
#!/usr/bin/env python3
"""Install certificates from AWS S3.
This file is a template. It should be processed by Terraform.
"""
# Third-Party Libraries
import boto3
# Inputs from terraform
CERT_BUCKET_NAME = "${cert_bucket_name}"
CERT_READ_ROLE_ARN = "${cert_read_role_arn}"
SERVER_FQDN = "${server_fqdn}"
# These files will be copied from the bucket and installed in the
# specified location.
INSTALLATION_MAP = {
"fullchain.pem": "/etc/openvpn/server/server.crt",
"privkey.pem": "/etc/openvpn/server/server.key",
}
# Create STS client
sts = boto3.client("sts")
# Assume the role that can read the certificate
stsresponse = sts.assume_role(
RoleArn=CERT_READ_ROLE_ARN, RoleSessionName="cert_installation"
)
newsession_id = stsresponse["Credentials"]["AccessKeyId"]
newsession_key = stsresponse["Credentials"]["SecretAccessKey"]
newsession_token = stsresponse["Credentials"]["SessionToken"]
# Create a new client to access S3 using the temporary credentials
s3 = boto3.client(
"s3",
aws_access_key_id=newsession_id,
aws_secret_access_key=newsession_key,
aws_session_token=newsession_token,
)
# Copy each file from the bucket to the local file system
for src, dst in INSTALLATION_MAP.items():
obj = s3.get_object(
Bucket=CERT_BUCKET_NAME, Key="live/{}/{}".format(SERVER_FQDN, src)
)
with open(dst, "wb") as f:
f.write(obj["Body"].read())
| cloudinit/install-certificates.py | 1,401 | Install certificates from AWS S3.
This file is a template. It should be processed by Terraform.
!/usr/bin/env python3 Third-Party Libraries Inputs from terraform These files will be copied from the bucket and installed in the specified location. Create STS client Assume the role that can read the certificate Create a new client to access S3 using the temporary credentials Copy each file from the bucket to the local file system | 433 | en | 0.827739 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from PyQt5.QtGui import QImage, qRgb, QPixmap
import numpy as np
import numpy as np
gray_color_table = [qRgb(i, i, i) for i in range(256)]
def toQImage(data, copy=True):
if data is None:
return QImage()
data = data.copy()
data[data>255] = 255
data[data<0] = 0
data = data.astype(np.uint8)
if data.dtype == np.uint8:
if len(data.shape) == 2:
qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy() if copy else qim
elif len(data.shape) == 3:
if data.shape[2] == 1:
qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_Grayscale8)
return qim.copy() if copy else qim
if data.shape[2] == 3:
qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_RGB888)
return qim.copy() if copy else qim
elif data.shape[2] == 4:
qim = QImage(data.data, data.shape[1], data.shape[0], data.strides[0], QImage.Format_ARGB32)
return qim.copy() if copy else qim
else:
raise Exception("Conversion of %d channel array to QImage not implemented" % data.shape[2])
raise Exception("Conversion of %d dimension array to QImage not implemented" % len(data.shape))
def toQPixmap(data):
if data is None: return QPixmap()
elif isinstance(data, QPixmap): return data
elif isinstance(data, QImage): QPixmap.fromImage(data)
elif hasattr(data, 'pixmap'): return data.pixmap()
else: return QPixmap.fromImage(toQImage(data))
def qPixmapToNumpy(pixmap):
image = pixmap.toImage()
image = image.convertToFormat(QImage.Format.Format_RGB32)
width = image.width()
height = image.height()
ptr = image.bits()
ptr.setsize(height * width * 4)
arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4))
return arr[:, :, 0:3].copy()
| python/iviz/Util.py | 2,099 | !/usr/bin/env python3-*- coding: utf-8 -*- | 42 | fr | 0.319203 |
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import base64
import logging
import socket
import ssl
import getpass
import uuid
from struct import unpack
from collections import deque, namedtuple
import random
# noinspection PyCompatibility,PyUnresolvedReferences
from six import raise_from, string_types, integer_types, PY2
if PY2:
from urlparse import urlparse, parse_qs
else:
from urllib.parse import urlparse, parse_qs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Dict, Literal, Optional, Type, Union
from typing_extensions import Self
import vertica_python
from .. import errors
from ..vertica import messages
from ..vertica.cursor import Cursor
from ..vertica.messages.message import BackendMessage, FrontendMessage
from ..vertica.messages.frontend_messages import CancelRequest
from ..vertica.log import VerticaLogging
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5433
DEFAULT_PASSWORD = ''
DEFAULT_AUTOCOMMIT = False
DEFAULT_BACKUP_SERVER_NODE = []
DEFAULT_KRB_SERVICE_NAME = 'vertica'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_PATH = 'vertica_python.log'
try:
DEFAULT_USER = getpass.getuser()
except Exception as e:
DEFAULT_USER = None
print("WARN: Cannot get the login user name: {}".format(str(e)))
def connect(**kwargs):
# type: (Any) -> Connection
"""Opens a new connection to a Vertica database."""
return Connection(kwargs)
def parse_dsn(dsn):
"""Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
"""
url = urlparse(dsn)
if url.scheme != 'vertica':
raise ValueError("Only vertica:// scheme is supported.")
# Ignore blank/invalid values
result = {k: v for k, v in (
('host', url.hostname),
('port', url.port),
('user', url.username),
('password', url.password),
('database', url.path[1:])) if v
}
for key, values in parse_qs(url.query, keep_blank_values=True).items():
# Try to get the last non-blank value in the list of values for each key
for i in reversed(range(len(values))):
value = values[i]
if value != '':
break
if value == '' and key != 'log_path':
# blank values are to be ignored
continue
elif key == 'backup_server_node':
continue
elif key in ('connection_load_balance', 'use_prepared_statements',
'disable_copy_local', 'ssl', 'autocommit'):
lower = value.lower()
if lower in ('true', 'on', '1'):
result[key] = True
elif lower in ('false', 'off', '0'):
result[key] = False
elif key == 'connection_timeout':
result[key] = float(value)
elif key == 'log_level' and value.isdigit():
result[key] = int(value)
else:
result[key] = value
return result
_AddressEntry = namedtuple('_AddressEntry', ['host', 'resolved', 'data'])
class _AddressList(object):
def __init__(self, host, port, backup_nodes, logger):
"""Creates a new deque with the primary host first, followed by any backup hosts"""
self._logger = logger
# Items in address_deque are _AddressEntry values.
# host is the original hostname/ip, used by SSL option check_hostname
# - when resolved is False, data is port
# - when resolved is True, data is the 5-tuple from socket.getaddrinfo
# This allows for lazy resolution. Seek peek() for more.
self.address_deque = deque()
# load primary host into address_deque
self._append(host, port)
# load backup nodes into address_deque
if not isinstance(backup_nodes, list):
err_msg = 'Connection option "backup_server_node" must be a list'
self._logger.error(err_msg)
raise TypeError(err_msg)
# Each item in backup_nodes should be either
# a host name or IP address string (using default port) or
# a (host, port) tuple
for node in backup_nodes:
if isinstance(node, string_types):
self._append(node, DEFAULT_PORT)
elif isinstance(node, tuple) and len(node) == 2:
self._append(node[0], node[1])
else:
err_msg = ('Each item of connection option "backup_server_node"'
' must be a host string or a (host, port) tuple')
self._logger.error(err_msg)
raise TypeError(err_msg)
self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
def _append(self, host, port):
if not isinstance(host, string_types):
err_msg = 'Host must be a string: invalid value: {0}'.format(host)
self._logger.error(err_msg)
raise TypeError(err_msg)
if not isinstance(port, (string_types, integer_types)):
err_msg = 'Port must be an integer or a string: invalid value: {0}'.format(port)
self._logger.error(err_msg)
raise TypeError(err_msg)
elif isinstance(port, string_types):
try:
port = int(port)
except ValueError as e:
err_msg = 'Port "{0}" is not a valid string: {1}'.format(port, e)
self._logger.error(err_msg)
raise ValueError(err_msg)
if port < 0 or port > 65535:
err_msg = 'Invalid port number: {0}'.format(port)
self._logger.error(err_msg)
raise ValueError(err_msg)
self.address_deque.append(_AddressEntry(host=host, resolved=False, data=port))
def push(self, host, port):
self.address_deque.appendleft(_AddressEntry(host=host, resolved=False, data=port))
def pop(self):
self.address_deque.popleft()
def peek(self):
# do lazy DNS resolution, returning the leftmost socket.getaddrinfo result
if len(self.address_deque) == 0:
return None
while len(self.address_deque) > 0:
self._logger.debug('Peek at address list: {0}'.format(list(self.address_deque)))
entry = self.address_deque[0]
if entry.resolved:
# return a resolved sockaddrinfo
return entry.data
else:
# DNS resolve a single host name to multiple IP addresses
self.pop()
# keep host and port info for adding address entry to deque once it has been resolved
host, port = entry.host, entry.data
try:
resolved_hosts = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except Exception as e:
self._logger.warning('Error resolving host "{0}" on port {1}: {2}'.format(host, port, e))
continue
# add resolved addrinfo (AF_INET and AF_INET6 only) to deque
random.shuffle(resolved_hosts)
for addrinfo in resolved_hosts:
if addrinfo[0] in (socket.AF_INET, socket.AF_INET6):
self.address_deque.appendleft(_AddressEntry(
host=host, resolved=True, data=addrinfo))
return None
def peek_host(self):
# returning the leftmost host result
self._logger.debug('Peek host at address list: {0}'.format(list(self.address_deque)))
if len(self.address_deque) == 0:
return None
return self.address_deque[0].host
def _generate_session_label():
return '{type}-{version}-{id}'.format(
type='vertica-python',
version=vertica_python.__version__,
id=uuid.uuid1()
)
class Connection(object):
def __init__(self, options=None):
# type: (Optional[Dict[str, Any]]) -> None
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
options = options or {}
self.options = parse_dsn(options['dsn']) if 'dsn' in options else {}
self.options.update({key: value for key, value in options.items() \
if key == 'log_path' or (key != 'dsn' and value is not None)})
# Set up connection logger
logger_name = 'vertica_{0}_{1}'.format(id(self), str(uuid.uuid4())) # must be a unique value
self._logger = logging.getLogger(logger_name)
if 'log_level' not in self.options and 'log_path' not in self.options:
# logger is disabled by default
self._logger.disabled = True
else:
self.options.setdefault('log_level', DEFAULT_LOG_LEVEL)
self.options.setdefault('log_path', DEFAULT_LOG_PATH)
VerticaLogging.setup_logging(logger_name, self.options['log_path'],
self.options['log_level'], id(self))
self.options.setdefault('host', DEFAULT_HOST)
self.options.setdefault('port', DEFAULT_PORT)
if 'user' not in self.options:
if DEFAULT_USER:
self.options['user'] = DEFAULT_USER
else:
msg = 'Connection option "user" is required'
self._logger.error(msg)
raise KeyError(msg)
self.options.setdefault('database', self.options['user'])
self.options.setdefault('password', DEFAULT_PASSWORD)
self.options.setdefault('autocommit', DEFAULT_AUTOCOMMIT)
self.options.setdefault('session_label', _generate_session_label())
self.options.setdefault('backup_server_node', DEFAULT_BACKUP_SERVER_NODE)
self.options.setdefault('kerberos_service_name', DEFAULT_KRB_SERVICE_NAME)
# Kerberos authentication hostname defaults to the host value here so
# the correct value cannot be overwritten by load balancing or failover
self.options.setdefault('kerberos_host_name', self.options['host'])
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
# we only support one cursor per connection
self.options.setdefault('unicode_error', None)
self._cursor = Cursor(self, self._logger, cursor_type=None,
unicode_error=self.options['unicode_error'])
# knob for using server-side prepared statements
self.options.setdefault('use_prepared_statements', False)
self._logger.debug('Connection prepared statements is {}'.format(
'enabled' if self.options['use_prepared_statements'] else 'disabled'))
# knob for disabling COPY LOCAL operations
self.options.setdefault('disable_copy_local', False)
self._logger.debug('COPY LOCAL operation is {}'.format(
'disabled' if self.options['disable_copy_local'] else 'enabled'))
self._logger.info('Connecting as user "{}" to database "{}" on host "{}" with port {}'.format(
self.options['user'], self.options['database'],
self.options['host'], self.options['port']))
self.startup_connection()
# Initially, for a new session, autocommit is off
if self.options['autocommit']:
self.autocommit = True
self._logger.info('Connection is ready')
#############################################
# supporting `with` statements
#############################################
def __enter__(self):
# type: () -> Self
return self
def __exit__(self, type_, value, traceback):
self.close()
#############################################
# dbapi methods
#############################################
def close(self):
self._logger.info('Close the connection')
try:
self.write(messages.Terminate())
finally:
self.close_socket()
def commit(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('COMMIT;')
def rollback(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('ROLLBACK;')
def cursor(self, cursor_type=None):
# type: (Self, Optional[Union[Literal['list', 'dict'], Type[list[Any]], Type[dict[Any, Any]]]]) -> Cursor
if self.closed():
raise errors.ConnectionError('Connection is closed')
if self._cursor.closed():
self._cursor._closed = False
# let user change type if they want?
self._cursor.cursor_type = cursor_type
return self._cursor
#############################################
# non-dbapi methods
#############################################
@property
def autocommit(self):
"""Read the connection's AUTOCOMMIT setting from cache"""
return self.parameters.get('auto_commit', 'off') == 'on'
@autocommit.setter
def autocommit(self, value):
"""Change the connection's AUTOCOMMIT setting"""
if self.autocommit is value:
return
val = 'on' if value else 'off'
cur = self.cursor()
cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False)
cur.fetchall() # check for errors and update the cache
def cancel(self):
"""Cancel the current database operation. This can be called from a
different thread than the one currently executing a database operation.
"""
if self.closed():
raise errors.ConnectionError('Connection is closed')
self._logger.info('Canceling the current database operation')
# Must create a new socket connection to the server
temp_socket = self.establish_socket_connection(self.address_list)
self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket)
temp_socket.close()
self._logger.info('Cancel request issued')
def opened(self):
return (self.socket is not None
and self.backend_pid is not None
and self.transaction_status is not None)
def closed(self):
return not self.opened()
def __str__(self):
safe_options = {key: value for key, value in self.options.items() if key != 'password'}
s1 = "<Vertica.Connection:{0} parameters={1} backend_pid={2}, ".format(
id(self), self.parameters, self.backend_pid)
s2 = "backend_key={0}, transaction_status={1}, socket={2}, options={3}>".format(
self.backend_key, self.transaction_status, self.socket, safe_options)
return ''.join([s1, s2])
#############################################
# internal
#############################################
def reset_values(self):
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
def _socket(self):
if self.socket:
return self.socket
# the initial establishment of the client connection
raw_socket = self.establish_socket_connection(self.address_list)
# enable load balancing
load_balance_options = self.options.get('connection_load_balance')
self._logger.debug('Connection load balance option is {0}'.format(
'enabled' if load_balance_options else 'disabled'))
if load_balance_options:
raw_socket = self.balance_load(raw_socket)
# enable SSL
ssl_options = self.options.get('ssl')
self._logger.debug('SSL option is {0}'.format('enabled' if ssl_options else 'disabled'))
if ssl_options:
raw_socket = self.enable_ssl(raw_socket, ssl_options)
self.socket = raw_socket
return self.socket
def _socket_as_file(self):
if self.socket_as_file is None:
self.socket_as_file = self._socket().makefile('rb')
return self.socket_as_file
def create_socket(self, family):
"""Create a TCP socket object"""
raw_socket = socket.socket(family, socket.SOCK_STREAM)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
connection_timeout = self.options.get('connection_timeout')
if connection_timeout is not None:
self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout))
raw_socket.settimeout(connection_timeout)
return raw_socket
def balance_load(self, raw_socket):
# Send load balance request and read server response
self._logger.debug('=> %s', messages.LoadBalanceRequest())
raw_socket.sendall(messages.LoadBalanceRequest().get_message())
response = raw_socket.recv(1)
if response == b'Y':
size = unpack('!I', raw_socket.recv(4))[0]
if size < 4:
err_msg = "Bad message size: {0}".format(size)
self._logger.error(err_msg)
raise errors.MessageError(err_msg)
res = BackendMessage.from_type(type_=response, data=raw_socket.recv(size - 4))
self._logger.debug('<= %s', res)
host = res.get_host()
port = res.get_port()
self._logger.info('Load balancing to host "{0}" on port {1}'.format(host, port))
peer = raw_socket.getpeername()
socket_host, socket_port = peer[0], peer[1]
if host == socket_host and port == socket_port:
self._logger.info('Already connecting to host "{0}" on port {1}. Ignore load balancing.'.format(host, port))
return raw_socket
# Push the new host onto the address list before connecting again. Note that this
# will leave the originally-specified host as the first failover possibility.
self.address_list.push(host, port)
raw_socket.close()
raw_socket = self.establish_socket_connection(self.address_list)
else:
self._logger.debug('<= LoadBalanceResponse: %s', response)
self._logger.warning("Load balancing requested but not supported by server")
return raw_socket
def enable_ssl(self, raw_socket, ssl_options):
# Send SSL request and read server response
self._logger.debug('=> %s', messages.SslRequest())
raw_socket.sendall(messages.SslRequest().get_message())
response = raw_socket.recv(1)
self._logger.debug('<= SslResponse: %s', response)
if response == b'S':
self._logger.info('Enabling SSL')
try:
if isinstance(ssl_options, ssl.SSLContext):
server_host = self.address_list.peek_host()
if server_host is None: # This should not happen
msg = 'Cannot get the connected server host while enabling SSL'
self._logger.error(msg)
raise errors.ConnectionError(msg)
raw_socket = ssl_options.wrap_socket(raw_socket, server_hostname=server_host)
else:
raw_socket = ssl.wrap_socket(raw_socket)
except ssl.CertificateError as e:
raise_from(errors.ConnectionError(str(e)), e)
except ssl.SSLError as e:
raise_from(errors.ConnectionError(str(e)), e)
else:
err_msg = "SSL requested but not supported by server"
self._logger.error(err_msg)
raise errors.SSLNotSupported(err_msg)
return raw_socket
def establish_socket_connection(self, address_list):
"""Given a list of database node addresses, establish the socket
connection to the database server. Return a connected socket object.
"""
addrinfo = address_list.peek()
raw_socket = None
last_exception = None
# Failover: loop to try all addresses
while addrinfo:
(family, socktype, proto, canonname, sockaddr) = addrinfo
last_exception = None
# _AddressList filters all addrs to AF_INET and AF_INET6, which both
# have host and port as values 0, 1 in the sockaddr tuple.
host = sockaddr[0]
port = sockaddr[1]
self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port))
try:
raw_socket = self.create_socket(family)
raw_socket.connect(sockaddr)
break
except Exception as e:
self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e))
last_exception = e
address_list.pop()
addrinfo = address_list.peek()
raw_socket.close()
# all of the addresses failed
if raw_socket is None or last_exception:
err_msg = 'Failed to establish a connection to the primary server or any backup address.'
self._logger.error(err_msg)
raise errors.ConnectionError(err_msg)
return raw_socket
def ssl(self):
return self.socket is not None and isinstance(self.socket, ssl.SSLSocket)
def write(self, message, vsocket=None):
if not isinstance(message, FrontendMessage):
raise TypeError("invalid message: ({0})".format(message))
if vsocket is None:
vsocket = self._socket()
self._logger.debug('=> %s', message)
try:
for data in message.fetch_message():
size = 8192 # Max msg size, consistent with how the server works
pos = 0
while pos < len(data):
sent = vsocket.send(data[pos : pos + size])
if sent == 0:
raise errors.ConnectionError("Couldn't send message: Socket connection broken")
pos += sent
except Exception as e:
self.close_socket()
self._logger.error(str(e))
if isinstance(e, IOError):
raise_from(errors.ConnectionError(str(e)), e)
else:
raise
def close_socket(self):
try:
if self.socket is not None:
self._socket().close()
if self.socket_as_file is not None:
self._socket_as_file().close()
finally:
self.reset_values()
def reset_connection(self):
self.close()
self.startup_connection()
def is_asynchronous_message(self, message):
# Check if it is an asynchronous response message
# Note: ErrorResponse is a subclass of NoticeResponse
return (isinstance(message, messages.ParameterStatus) or
(isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)))
def handle_asynchronous_message(self, message):
if isinstance(message, messages.ParameterStatus):
if message.name == 'protocol_version':
message.value = int(message.value)
self.parameters[message.name] = message.value
elif (isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)):
if getattr(self, 'notice_handler', None) is not None:
self.notice_handler(message)
else:
self._logger.warning(message.error_message())
def read_string(self):
s = bytearray()
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
s.extend(char)
return s
def read_message(self):
while True:
try:
type_ = self.read_bytes(1)
size = unpack('!I', self.read_bytes(4))[0]
if size < 4:
raise errors.MessageError("Bad message size: {0}".format(size))
if type_ == messages.WriteFile.message_id:
# The whole WriteFile message may not be read at here.
# Instead, only the file name and file length is read.
# This is because the message could be too large to read all at once.
f = self.read_string()
filename = f.decode('utf-8')
file_length = unpack('!I', self.read_bytes(4))[0]
size -= 4 + len(f) + 1 + 4
if size != file_length:
raise errors.MessageError("Bad message size: {0}".format(size))
if filename == '':
# If there is no filename, then this is really RETURNREJECTED data, not a rejected file
if file_length % 8 != 0:
raise errors.MessageError("Bad RETURNREJECTED data size: {0}".format(file_length))
data = self.read_bytes(file_length)
message = messages.WriteFile(filename, file_length, data)
else:
# The rest of the message is read later with write_to_disk()
message = messages.WriteFile(filename, file_length)
else:
message = BackendMessage.from_type(type_, self.read_bytes(size - 4))
self._logger.debug('<= %s', message)
self.handle_asynchronous_message(message)
# handle transaction status
if isinstance(message, messages.ReadyForQuery):
self.transaction_status = message.transaction_status
except (SystemError, IOError) as e:
self.close_socket()
# noinspection PyTypeChecker
self._logger.error(e)
raise_from(errors.ConnectionError(str(e)), e)
if not self.is_asynchronous_message(message):
break
return message
def read_expected_message(self, expected_types, error_handler=None):
# Reads a message and does some basic error handling.
# expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes
message = self.read_message()
if isinstance(message, expected_types):
return message
elif isinstance(message, messages.ErrorResponse):
if error_handler is not None:
error_handler(message)
else:
raise errors.DatabaseError(message.error_message())
else:
msg = 'Received unexpected message type: {}. '.format(type(message).__name__)
if isinstance(expected_types, tuple):
msg += 'Expected types: {}'.format(", ".join([t.__name__ for t in expected_types]))
else:
msg += 'Expected type: {}'.format(expected_types.__name__)
self._logger.error(msg)
raise errors.MessageError(msg)
def read_bytes(self, n):
if n == 1:
result = self._socket_as_file().read(1)
if not result:
raise errors.ConnectionError("Connection closed by Vertica")
return result
else:
buf = b""
to_read = n
while to_read > 0:
data = self._socket_as_file().read(to_read)
received = len(data)
if received == 0:
raise errors.ConnectionError("Connection closed by Vertica")
buf += data
to_read -= received
return buf
def send_GSS_response_and_receive_challenge(self, response):
# Send the GSS response data to the vertica server
token = base64.b64decode(response)
self.write(messages.Password(token, messages.Authentication.GSS))
# Receive the challenge from the vertica server
message = self.read_expected_message(messages.Authentication)
if message.code != messages.Authentication.GSS_CONTINUE:
msg = ('Received unexpected message type: Authentication(type={}).'
' Expected type: Authentication(type={})'.format(
message.code, messages.Authentication.GSS_CONTINUE))
self._logger.error(msg)
raise errors.MessageError(msg)
return message.auth_data
def make_GSS_authentication(self):
try:
import kerberos
except ImportError as e:
raise errors.ConnectionError("{}\nCannot make a Kerberos "
"authentication because no Kerberos package is installed. "
"Get it with 'pip install kerberos'.".format(str(e)))
# Set GSS flags
gssflag = (kerberos.GSS_C_DELEG_FLAG | kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG | kerberos.GSS_C_REPLAY_FLAG)
# Generate the GSS-style service principal name
service_principal = "{}@{}".format(self.options['kerberos_service_name'],
self.options['kerberos_host_name'])
# Initializes a context object with a service principal
self._logger.info('Initializing a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
try:
result, context = kerberos.authGSSClientInit(service_principal, gssflags=gssflag)
except kerberos.GSSError as err:
msg = "GSSAPI initialization error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
if result != kerberos.AUTH_GSS_COMPLETE:
msg = ('Failed to initialize a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
self._logger.error(msg)
raise errors.KerberosError(msg)
# Processes GSSAPI client-side steps
try:
challenge = b''
while True:
self._logger.info('Processing a single GSSAPI client-side step')
challenge = base64.b64encode(challenge).decode("utf-8")
result = kerberos.authGSSClientStep(context, challenge)
if result == kerberos.AUTH_GSS_COMPLETE:
self._logger.info('Result: GSSAPI step complete')
break
elif result == kerberos.AUTH_GSS_CONTINUE:
self._logger.info('Result: GSSAPI step continuation')
# Get the response from the last successful GSSAPI client-side step
response = kerberos.authGSSClientResponse(context)
challenge = self.send_GSS_response_and_receive_challenge(response)
else:
msg = "GSSAPI client-side step error status {}".format(result)
self._logger.error(msg)
raise errors.KerberosError(msg)
except kerberos.GSSError as err:
msg = "GSSAPI client-side step error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
def startup_connection(self):
user = self.options['user']
database = self.options['database']
session_label = self.options['session_label']
os_user_name = DEFAULT_USER if DEFAULT_USER else ''
password = self.options['password']
self.write(messages.Startup(user, database, session_label, os_user_name))
while True:
message = self.read_message()
if isinstance(message, messages.Authentication):
if message.code == messages.Authentication.OK:
self._logger.info("User {} successfully authenticated"
.format(self.options['user']))
elif message.code == messages.Authentication.CHANGE_PASSWORD:
msg = "The password for user {} has expired".format(self.options['user'])
self._logger.error(msg)
raise errors.ConnectionError(msg)
elif message.code == messages.Authentication.PASSWORD_GRACE:
self._logger.warning('The password for user {} will expire soon.'
' Please consider changing it.'.format(self.options['user']))
elif message.code == messages.Authentication.GSS:
self.make_GSS_authentication()
else:
self.write(messages.Password(password, message.code,
{'user': user,
'salt': getattr(message, 'salt', None),
'usersalt': getattr(message, 'usersalt', None)}))
elif isinstance(message, messages.BackendKeyData):
self.backend_pid = message.pid
self.backend_key = message.key
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.ErrorResponse):
self._logger.error(message.error_message())
raise errors.ConnectionError(message.error_message())
else:
msg = "Received unexpected startup message: {0}".format(message)
self._logger.error(msg)
raise errors.MessageError(msg)
| vertica_python/vertica/connection.py | 35,911 | Creates a new deque with the primary host first, followed by any backup hosts
Read the connection's AUTOCOMMIT setting from cache
Change the connection's AUTOCOMMIT setting
Cancel the current database operation. This can be called from a
different thread than the one currently executing a database operation.
Opens a new connection to a Vertica database.
Create a TCP socket object
Given a list of database node addresses, establish the socket
connection to the database server. Return a connected socket object.
Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
Copyright (c) 2018-2022 Micro Focus or one of its affiliates. Copyright (c) 2018 Uber Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Copyright (c) 2013-2017 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. noinspection PyCompatibility,PyUnresolvedReferences type: (Any) -> Connection Ignore blank/invalid values Try to get the last non-blank value in the list of values for each key blank values are to be ignored Items in address_deque are _AddressEntry values. host is the original hostname/ip, used by SSL option check_hostname - when resolved is False, data is port - when resolved is True, data is the 5-tuple from socket.getaddrinfo This allows for lazy resolution. Seek peek() for more. load primary host into address_deque load backup nodes into address_deque Each item in backup_nodes should be either a host name or IP address string (using default port) or a (host, port) tuple do lazy DNS resolution, returning the leftmost socket.getaddrinfo result return a resolved sockaddrinfo DNS resolve a single host name to multiple IP addresses keep host and port info for adding address entry to deque once it has been resolved add resolved addrinfo (AF_INET and AF_INET6 only) to deque returning the leftmost host result type: (Optional[Dict[str, Any]]) -> None Set up connection logger must be a unique value logger is disabled by default Kerberos authentication hostname defaults to the host value here so the correct value cannot be overwritten by load balancing or failover we only support one cursor per connection knob for using server-side prepared statements knob for disabling COPY LOCAL operations Initially, for a new session, autocommit is off supporting `with` statements type: () -> Self dbapi methods type: (Self, Optional[Union[Literal['list', 'dict'], Type[list[Any]], Type[dict[Any, Any]]]]) -> Cursor let user change type if they want? non-dbapi methods check for errors and update the cache Must create a new socket connection to the server internal the initial establishment of the client connection enable load balancing enable SSL Send load balance request and read server response Push the new host onto the address list before connecting again. Note that this will leave the originally-specified host as the first failover possibility. Send SSL request and read server response This should not happen Failover: loop to try all addresses _AddressList filters all addrs to AF_INET and AF_INET6, which both have host and port as values 0, 1 in the sockaddr tuple. all of the addresses failed Max msg size, consistent with how the server works Check if it is an asynchronous response message Note: ErrorResponse is a subclass of NoticeResponse The whole WriteFile message may not be read at here. Instead, only the file name and file length is read. This is because the message could be too large to read all at once. If there is no filename, then this is really RETURNREJECTED data, not a rejected file The rest of the message is read later with write_to_disk() handle transaction status noinspection PyTypeChecker Reads a message and does some basic error handling. expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes Send the GSS response data to the vertica server Receive the challenge from the vertica server Set GSS flags Generate the GSS-style service principal name Initializes a context object with a service principal Processes GSSAPI client-side steps Get the response from the last successful GSSAPI client-side step | 5,662 | en | 0.840623 |
"""Tests for the Timestamp class."""
#
# (C) Pywikibot team, 2014-2021
#
# Distributed under the terms of the MIT license.
#
import calendar
import datetime
import re
import unittest
from contextlib import suppress
from pywikibot import Timestamp
from tests.aspects import TestCase
class TestTimestamp(TestCase):
"""Test Timestamp class comparisons."""
net = False
def test_clone(self):
"""Test cloning a Timestamp instance."""
t1 = Timestamp.utcnow()
t2 = t1.clone()
self.assertEqual(t1, t2)
self.assertIsInstance(t2, Timestamp)
def test_instantiate_from_instance(self):
"""Test passing instance to factory methods works."""
t1 = Timestamp.utcnow()
self.assertIsNot(t1, Timestamp.fromISOformat(t1))
self.assertEqual(t1, Timestamp.fromISOformat(t1))
self.assertIsInstance(Timestamp.fromISOformat(t1), Timestamp)
self.assertIsNot(t1, Timestamp.fromtimestampformat(t1))
self.assertEqual(t1, Timestamp.fromtimestampformat(t1))
self.assertIsInstance(Timestamp.fromtimestampformat(t1), Timestamp)
def test_iso_format(self):
"""Test conversion from and to ISO format."""
sep = 'T'
t1 = Timestamp.utcnow()
if not t1.microsecond: # T199179: ensure microsecond is not 0
t1 = t1.replace(microsecond=1)
ts1 = t1.isoformat()
t2 = Timestamp.fromISOformat(ts1)
ts2 = t2.isoformat()
# MediaWiki ISO format doesn't include microseconds
self.assertNotEqual(t1, t2)
t1 = t1.replace(microsecond=0)
self.assertEqual(t1, t2)
self.assertEqual(ts1, ts2)
date, sep, time = ts1.partition(sep)
time = time.rstrip('Z')
self.assertEqual(date, str(t1.date()))
self.assertEqual(time, str(t1.time()))
def test_iso_format_with_sep(self):
"""Test conversion from and to ISO format with separator."""
sep = '*'
t1 = Timestamp.utcnow().replace(microsecond=0)
ts1 = t1.isoformat(sep=sep)
t2 = Timestamp.fromISOformat(ts1, sep=sep)
ts2 = t2.isoformat(sep=sep)
self.assertEqual(t1, t2)
self.assertEqual(t1, t2)
self.assertEqual(ts1, ts2)
date, sep, time = ts1.partition(sep)
time = time.rstrip('Z')
self.assertEqual(date, str(t1.date()))
self.assertEqual(time, str(t1.time()))
def test_iso_format_property(self):
"""Test iso format properties."""
self.assertEqual(Timestamp.ISO8601Format, Timestamp._ISO8601Format())
self.assertEqual(re.sub(r'[\-:TZ]', '', Timestamp.ISO8601Format),
Timestamp.mediawikiTSFormat)
def test_mediawiki_format(self):
"""Test conversion from and to Timestamp format."""
t1 = Timestamp.utcnow()
if not t1.microsecond: # T191827: ensure microsecond is not 0
t1 = t1.replace(microsecond=1000)
ts1 = t1.totimestampformat()
t2 = Timestamp.fromtimestampformat(ts1)
ts2 = t2.totimestampformat()
# MediaWiki timestamp format doesn't include microseconds
self.assertNotEqual(t1, t2)
t1 = t1.replace(microsecond=0)
self.assertEqual(t1, t2)
self.assertEqual(ts1, ts2)
def test_short_mediawiki_format(self):
"""Test short mw timestamp conversion from and to Timestamp format."""
t1 = Timestamp(2018, 12, 17)
t2 = Timestamp.fromtimestampformat('20181217') # short timestamp
ts1 = t1.totimestampformat()
ts2 = t2.totimestampformat()
self.assertEqual(t1, t2)
self.assertEqual(ts1, ts2)
def test_add_timedelta(self):
"""Test addin a timedelta to a Timestamp."""
t1 = Timestamp.utcnow()
t2 = t1 + datetime.timedelta(days=1)
if t1.month != t2.month:
self.assertEqual(1, t2.day)
else:
self.assertEqual(t1.day + 1, t2.day)
self.assertIsInstance(t2, Timestamp)
def test_add_timedate(self):
"""Test unsupported additions raise NotImplemented."""
t1 = datetime.datetime.utcnow()
t2 = t1 + datetime.timedelta(days=1)
t3 = t1.__add__(t2)
self.assertIs(t3, NotImplemented)
# Now check that the pywikibot sub-class behaves the same way
t1 = Timestamp.utcnow()
t2 = t1 + datetime.timedelta(days=1)
t3 = t1.__add__(t2)
self.assertIs(t3, NotImplemented)
def test_sub_timedelta(self):
"""Test subtracting a timedelta from a Timestamp."""
t1 = Timestamp.utcnow()
t2 = t1 - datetime.timedelta(days=1)
if t1.month != t2.month:
self.assertEqual(calendar.monthrange(t2.year, t2.month)[1], t2.day)
else:
self.assertEqual(t1.day - 1, t2.day)
self.assertIsInstance(t2, Timestamp)
def test_sub_timedate(self):
"""Test subtracting two timestamps."""
t1 = Timestamp.utcnow()
t2 = t1 - datetime.timedelta(days=1)
td = t1 - t2
self.assertIsInstance(td, datetime.timedelta)
self.assertEqual(t2 + td, t1)
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
| tests/timestamp_tests.py | 5,251 | Test Timestamp class comparisons.
Test unsupported additions raise NotImplemented.
Test addin a timedelta to a Timestamp.
Test cloning a Timestamp instance.
Test passing instance to factory methods works.
Test conversion from and to ISO format.
Test iso format properties.
Test conversion from and to ISO format with separator.
Test conversion from and to Timestamp format.
Test short mw timestamp conversion from and to Timestamp format.
Test subtracting two timestamps.
Test subtracting a timedelta from a Timestamp.
Tests for the Timestamp class.
(C) Pywikibot team, 2014-2021 Distributed under the terms of the MIT license. T199179: ensure microsecond is not 0 MediaWiki ISO format doesn't include microseconds T191827: ensure microsecond is not 0 MediaWiki timestamp format doesn't include microseconds short timestamp Now check that the pywikibot sub-class behaves the same way pragma: no cover | 902 | en | 0.794291 |
"""Download an image from the Web App and detect coordinates.
download the image corresponding to the ID provided and run plant detection
and coordinate conversion
"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from plant_detection.PlantDetection import PlantDetection
from plant_detection import ENV
from plant_detection.Log import log
if __name__ == "__main__":
IMAGE_ID = ENV.load('PLANT_DETECTION_selected_image', get_json=False)
if IMAGE_ID is None:
log('No image selected.',
message_type='error', title='historical-plant-detection')
sys.exit(0)
PD = PlantDetection(coordinates=True, app=True, app_image_id=IMAGE_ID)
PD.detect_plants()
| priv/farmware/quickscripts/download_and_detect_coordinates.py | 720 | Download an image from the Web App and detect coordinates.
download the image corresponding to the ID provided and run plant detection
and coordinate conversion | 161 | en | 0.812332 |
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack1.append(x)
if len(self.stack2) == 0 or x <= self.stack2[-1]:
self.stack2.append(x)
def pop(self):
"""
:rtype: void
"""
top = self.stack1[-1]
self.stack1.pop()
if top == self.stack2[-1]:
self.stack2.pop()
def top(self):
"""
:rtype: int
"""
return self.stack1[-1]
def getMin(self):
"""
:rtype: int
"""
return self.stack2[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
"""
Time Complexity = O(n)
Space Complexity = O(n)
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
| LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py | 1,679 | initialize your data structure here.
:rtype: int
:rtype: void
:type x: int
:rtype: void
:rtype: int
Your MinStack object will be instantiated and called as such: obj = MinStack() obj.push(x) obj.pop() param_3 = obj.top() param_4 = obj.getMin() | 245 | en | 0.645098 |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_blocks
from modeling.architecture import nn_ops
def get_drop_connect_rate(init_rate, block_num, total_blocks):
"""Get drop connect rate for the ith block."""
if init_rate is not None:
return init_rate * float(block_num) / total_blocks
else:
return None
def block_group(inputs,
filters,
strides,
use_projection,
block_fn,
block_repeats,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
name=None,
is_training=False):
"""Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(
inputs,
filters,
strides,
use_projection=use_projection,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
for _ in range(1, block_repeats):
inputs = block_fn(
inputs,
filters,
1,
use_projection=False,
batch_norm_relu=batch_norm_relu,
dropblock=dropblock,
drop_connect_rate=drop_connect_rate,
data_format=data_format,
is_training=is_training)
return tf.identity(inputs, name)
class Resnet(object):
"""Class to build ResNet family model."""
def __init__(self,
resnet_depth,
dropblock=nn_ops.Dropblock(),
batch_norm_relu=nn_ops.BatchNormRelu(),
init_drop_connect_rate=None,
data_format='channels_last'):
"""ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
"""
self._resnet_depth = resnet_depth
self._dropblock = dropblock
self._batch_norm_relu = batch_norm_relu
self._init_drop_connect_rate = init_drop_connect_rate
self._data_format = data_format
model_params = {
10: {'block': nn_blocks.residual_block, 'layers': [1, 1, 1, 1]},
18: {'block': nn_blocks.residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': nn_blocks.residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': nn_blocks.bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': nn_blocks.bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': nn_blocks.bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
valid_resnet_depths = ', '.join(
[str(depth) for depth in sorted(model_params.keys())])
raise ValueError(
'The resnet_depth should be in [%s]. Not a valid resnet_depth:'%(
valid_resnet_depths), self._resnet_depth)
params = model_params[resnet_depth]
self._resnet_fn = self.resnet_v1_generator(
params['block'], params['layers'])
def __call__(self, inputs, is_training=False):
"""Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
"""
with tf.variable_scope('resnet%s' % self._resnet_depth):
return self._resnet_fn(inputs, is_training)
def resnet_v1_generator(self, block_fn, layers):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training=False):
"""Creation of the model graph."""
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = self._batch_norm_relu(inputs, is_training=is_training)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=self._data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
c2 = block_group(
inputs=inputs,
filters=64,
strides=1,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[0],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 2, 5),
name='block_group1',
is_training=is_training)
c3 = block_group(
inputs=c2,
filters=128,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[1],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 3, 5),
name='block_group2',
is_training=is_training)
c4 = block_group(
inputs=c3,
filters=256,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[2],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 4, 5),
name='block_group3',
is_training=is_training)
c5 = block_group(
inputs=c4,
filters=512,
strides=2,
use_projection=True,
block_fn=block_fn,
block_repeats=layers[3],
batch_norm_relu=self._batch_norm_relu,
dropblock=self._dropblock,
drop_connect_rate=get_drop_connect_rate(
self._init_drop_connect_rate, 5, 5),
name='block_group4',
is_training=is_training)
return {2: c2, 3: c3, 4: c4, 5: c5}
return model
| models/official/detection/modeling/architecture/resnet.py | 9,643 | Class to build ResNet family model.
Returns the ResNet model for a given size and number of output classes.
Args:
inputs: a `Tesnor` with shape [batch_size, height, width, 3] representing
a batch of images.
is_training: `bool` if True, the model is in training mode.
Returns:
a `dict` containing `int` keys for continuous feature levels [2, 3, 4, 5].
The values are corresponding feature hierarchy in ResNet with shape
[batch_size, height_l, width_l, num_filters].
ResNet initialization function.
Args:
resnet_depth: `int` depth of ResNet backbone model.
dropblock: a dropblock layer.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
init_drop_connect_rate: a 'float' number that specifies the initial drop
connection rate. Note that the default `None` means no drop connection
is applied.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Builds one group of blocks.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the first two convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
block_fn: the `function` for the block to use within the model
block_repeats: an `int` number of blocks to repeat in the group.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
name: a `str` name for the Tensor output of the block layer.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block layer.
Get drop connect rate for the ith block.
Creation of the model graph.
Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Lint as: python2, python3 Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Only the first block per block_group uses projection shortcut and strides. | 3,895 | en | 0.806828 |
import datetime
from constance import config
from django.conf import settings
from django.utils import dateparse , timezone
import django.db.models as db
def today(today=None):
if today is not None:
return dateparse.parse_date(today) if isinstance(today,basestring) else today
elif not getattr(settings,'FAKE_DATE',True):
return datetime.date.today()
elif isinstance(config.CURRENT_DATE,datetime.date):
return config.CURRENT_DATE
return datetime.date(*[int(i) for i in config.CURRENT_DATE.split('-')])
def parse_date(datestr):
return datetime.datetime.strptime(datestr,'%d-%m-%Y').date()
def make_date(date,month=0,day=0):
try:
new_date = datetime.datetime.combine(date,datetime.time())
except TypeError as e:
new_date = datetime.datetime(date,month,day)
return timezone.make_aware(new_date)
def angular_datepicker(datestr):
if datestr is None or hasattr(datestr,'isoformat'):
return datestr #datestr is a date
# datestr from angular datepicker is: 2015-10-18T05:54:53.529Z
return datetime.datetime.strptime(datestr[:10],'%Y-%m-%d').date()
def null_boolean_display(bool_value):
return {True:'Yes',
False:'No',
None:'Unkown'}.get(bool_value)
def null_boolean_form_value(bool_value):
'''
Return the value for a NullBooleanSelect wigit based on bool_value
'''
return {True:'2',False:'3',None:'1'}.get(bool_value)
def null_boolean_from_form(form_value):
'''
Return the boolean value based on a NullBooleanSelect form value
'''
return {'1':None,'2':True,'3':False}.get(form_value)
def days_as_str(days):
''' Return a short string version of days '''
if -7 <= days <= 7:
return '{:d}d'.format(days)
return '{:d}w'.format(int(round(days/7.0)))
class SQLiteDate(db.Func):
function = 'JULIANDAY'
def sqlite_date_diff(start_date,end_date,days=False):
''' return a DjanoORM Expression for the number of seconds/days between start_date and end_data '''
scale = 86400 if days is False else 1
return db.ExpressionWrapper( (SQLiteDate(end_date) - SQLiteDate(start_date)) * scale , db.IntegerField() )
def sql_count_when(*qargs,**kwargs):
""" qargs : list of models.Q objects
kwargs : filter_term=value dict
"""
condition = db.Q(**kwargs)
for q in qargs:
condition &= q
return db.Count( db.Case(
db.When(condition,then=1),output_field=db.IntegerField(),
))
| utils/utils.py | 2,487 | Return a short string version of days
Return the value for a NullBooleanSelect wigit based on bool_value
Return the boolean value based on a NullBooleanSelect form value
qargs : list of models.Q objects
kwargs : filter_term=value dict
return a DjanoORM Expression for the number of seconds/days between start_date and end_data
datestr is a date datestr from angular datepicker is: 2015-10-18T05:54:53.529Z | 408 | en | 0.649552 |
from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
| alphazero/network/policies.py | 35,496 | Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
Returns
-------
str
String representation of this instance.
Returns
-------
str
String representation of this instance.
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
member type annotations boundaries for the log standard deviation to increae training stability generate neural network except distribution heads member type annotations class variable generate neural network except distribution heads dist_head returns a tensor of shape [batch_size, 2*action_dim] split this tensor along the last dimension into parameters for mu and sigma samples from dist have shape [batch_size, action_dim] This creates an independent distribution for each action possibility so that the batch_shape of the distribution is identical to the shape of actions It's needed so that the log_probs are of the proper shape [batch_size, num_actions] else this throws since the distribution's batch_shape=[batch_shape] doesn't match the shape of the actions tensor, which is [batch_size, num_actions] member annotations class variable dist_head returns a tensor of shape [batch_size, 2*action_dim] split this tensor along the last dimension into parameters for mu and sigma Learning the log_std_dev is a trick for numerical stability Since the stddev > 0, we can learn the log and then exponentiate constrain log_std inside [log_param_min, log_param_max] This aligns the distribution batch_shape with the number of actions at the root It can be thought of as generating num_actions identical normal distributions for each agent and then sampling the log_prob for action from the distribution num_actions = actions.shape[-1] mu = mu.expand((-1, num_actions)) sigma = sigma.expand((-1, num_actions)) member annotations class variable calculate the number of parameters needed for the GMM 2 comes from each distribution being specifiec by 2 parameters mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components] the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components the elements in the second term (+ num_components) are the mixture coefficients get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim] get the num_components last tensor elements as logits for the mixture coefficients split the dist_params along the middle dimension (2*num_components) into means and log stddevs Learning the log_std_dev is a trick for numerical stability Since the stddev > 0, we can learn the log and then exponentiate constrain log_std inside [log_param_min, log_param_max] We need num_actions identical gmms to sample log_probs for each action member annotations class variable create distribution parameters Use the log_std_dev trick for alpha and beta since both alpha > 0 and beta > 0 ensure that the distribution batch_shape fits the number of actions taken for each agent at the root basic config string preprocessing to ensure mapping works later | 15,959 | en | 0.649504 |
#!/usr/bin/env python
# coding: utf-8
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
mname_tiny = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
build_dir = Path(tmpdirname)
src_vocab_file = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
tgt_vocab_file = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
merges_file = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp : fp.write("\n".join(merges))
tokenizer = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size = len(vocab),
tgt_vocab_size = len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
config = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000, tgt_vocab_size=1000,
d_model=4,
encoder_layers=1, decoder_layers=1,
encoder_ffn_dim=4, decoder_ffn_dim=4,
encoder_attention_heads=1, decoder_attention_heads=1,
)
tiny_model = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"], return_tensors="pt")
outputs = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| scripts/fsmt/fsmt-make-super-tiny-model.py | 2,679 | !/usr/bin/env python coding: utf-8 This script creates a super tiny model that is useful inside tests, when we just want to test that the machinery works, without needing to the check the quality of the outcomes. This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. The latter is done by `fsmt-make-super-tiny-model.py`. It will be used then as "stas/tiny-wmt19-en-ru" Build borrowed from a test Test Save makes it smaller Upload transformers-cli upload tiny-wmt19-en-ru | 710 | en | 0.882941 |
from http_prompt.tree import Node
class Context(object):
def __init__(self, url=None, spec=None):
self.url = url
self.headers = {}
self.querystring_params = {}
self.body_params = {}
self.body_json_params = {}
self.options = {}
self.should_exit = False
# Create a tree for supporting API spec and ls command
self.root = Node('root')
if spec:
if not self.url:
schemes = spec.get('schemes')
scheme = schemes[0] if schemes else 'https'
self.url = (scheme + '://' +
spec.get('host', 'http://localhost:8000') +
spec.get('basePath', ''))
base_path_tokens = list(filter(lambda s: s,
spec.get('basePath', '').split('/')))
paths = spec.get('paths')
if paths:
for path in paths:
path_tokens = (base_path_tokens +
list(filter(lambda s: s, path.split('/'))))
if path == '/': # Path is a trailing slash
path_tokens.insert(len(base_path_tokens), '/')
elif path[-1] == '/': # Path ends with a trailing slash
path_tokens[-1] = path_tokens[-1] + '/'
self.root.add_path(*path_tokens)
endpoint = paths[path]
for method, info in endpoint.items():
params = info.get('parameters')
if params:
for param in params:
if param.get("$ref"):
for section in param.get("$ref").split('/'):
param = param.get(section) if not section == "#" else spec
if param.get('in') != 'path':
full_path = path_tokens + [param['name']]
self.root.add_path(*full_path,
node_type='file')
elif not self.url:
self.url = 'http://localhost:8000'
def __eq__(self, other):
return (self.url == other.url and
self.headers == other.headers and
self.options == other.options and
self.querystring_params == other.querystring_params and
self.body_params == other.body_params and
self.body_json_params == other.body_json_params and
self.should_exit == other.should_exit)
def copy(self):
context = Context(self.url)
context.headers = self.headers.copy()
context.querystring_params = self.querystring_params.copy()
context.body_params = self.body_params.copy()
context.body_json_params = self.body_json_params.copy()
context.options = self.options.copy()
context.should_exit = self.should_exit
return context
def update(self, context):
if context.url:
self.url = context.url
self.headers.update(context.headers)
self.querystring_params.update(context.querystring_params)
self.body_params.update(context.body_params)
self.body_json_params.update(context.body_json_params)
self.options.update(context.options)
self.should_exit = self.should_exit
| http_prompt/context/__init__.py | 3,492 | Create a tree for supporting API spec and ls command Path is a trailing slash Path ends with a trailing slash | 109 | en | 0.846393 |
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The CephCoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "cephcoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for cephcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if cephcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('cephcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run cephcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("CEPHD", "cephcoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: cephcoind started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in cephcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a cephcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("CEPHD", "cephcoind")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: cephcoind started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple cephcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| qa/rpc-tests/test_framework/util.py | 21,638 | Copyright (c) 2014-2015 The Bitcoin Core developers Copyright (c) 2014-2017 The CephCoin Core developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Helpful routines for regression testing Add python-bitcoinrpc to module search path:Set Mocktime default to OFF.MOCKTIME is only needed for scripts that use thecached version of the blockchain. If the cachedversion of the blockchain is used without MOCKTIMEthen the mempools will not sync due to IBD.For backwared compatibility of the python scriptswith previous versions of the cache, set MOCKTIME to regtest genesis time + (201 * 156) store URL on proxy for info break out of loop on success Port not yet open? unknown IO error Initialization phase RPC in warmup? unkown JSON RPC exceptionfind and delete old cache directories if any exist Create cache directories, run cephcoinds: Create a 200-block-long chain; each of the 4 nodes gets 25 mature blocks and 25 immature. blocks are created with timestamps 156 seconds apart starting from 31356 seconds in the past Must sync before next peer starts generating blocks Shut them down, and clean up cache directories: Overwrite port/rpcport in cephcoin.conf remove IPv6 [...] wrapping RPC tests still depend on free transactions If one node failed to start, stop the others Emptying array closes connections as a side effect Wait for all bitcoinds to cleanly exit poll until version handshake complete to avoid race conditions with transaction relaying Create an extra change output to break up big inputs Split change in two, being careful of rounding: Create a send-to-self with confirmed inputs: Now immediately spend the output to create a 1-input, 1-output zero-priority transaction: Helper to create at least "count" utxos Pass in a fee that is sufficient for relay and mining new transactions. Create large OP_RETURN txouts that can be appended to a transaction to make it large (helper for constructing large transactions). Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create So we have big transactions (and therefore can't fit very many into each block) create one script_pubkeyOP_RETURN OP_PUSH2 512 bytes concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change add txout value add length of script_pubkey add script_pubkey Create a spend of each passed-in utxo, splicing in "txouts" to each raw transaction to make it large. See gen_return_txouts() above. | 2,541 | en | 0.788538 |
import pandas as pd
from Event import Event
from Team import Team
from Constant import Constant
import numpy as np
class Game:
"""A class for keeping info about the games"""
def __init__(self, path_to_json):
# self.events = None
self.home_team = None
self.guest_team = None
self.event = None
self.path_to_json = path_to_json
def read_json(self):
data_frame = pd.read_json(self.path_to_json)
last_default_index = len(data_frame) - 1
all_trajs = []
for i in range(last_default_index):
event = data_frame['events'][i]
self.event = Event(event)
trajs = self.event.get_traj() # (N,15,11,2)
if len(trajs) > 0:
all_trajs.append(trajs)
# print(i,len(trajs))
all_trajs = np.concatenate(all_trajs,axis=0)
return all_trajs
| datasets/nba/Game.py | 899 | A class for keeping info about the games
self.events = None (N,15,11,2) print(i,len(trajs)) | 93 | en | 0.745761 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerNewsItem(scrapy.Item):
url = scrapy.Field() # str
article_from = scrapy.Field() # str
article_type = scrapy.Field() # str
title = scrapy.Field() # str
publish_date = scrapy.Field() # str
authors = scrapy.Field() # list json
tags = scrapy.Field() # list json
text = scrapy.Field() # list json
text_html = scrapy.Field() # str
images = scrapy.Field() # list json
video = scrapy.Field() # list json
links = scrapy.Field() # list json
| crawler_news/items.py | 662 | -*- coding: utf-8 -*- Define here the models for your scraped items See documentation in: https://docs.scrapy.org/en/latest/topics/items.html str str str str str list json list json list json str list json list json list json | 225 | en | 0.380448 |
# -*- coding: utf-8 -*-
import pytest
from pytest_lazyfixture import sorted_by_dependency, lazy_fixture, _sorted_argnames
try:
import numpy
except ImportError:
numpy = None
def test_fixture_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.mark.parametrize('arg1,arg2', [
('val1', pytest.lazy_fixture('one')),
('val1', 'val2')
])
def test_func(arg1, arg2):
pass
""")
assert len(items) == 3
assert items[0].callspec.params['one'] == 1
assert items[1].callspec.params['one'] == 2
def test_several_fixtures_in_parametrize_with_params(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture(params=[1,2])
def one(request):
return request.param
@pytest.fixture(params=[3,4])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,arg3', [
('val1', pytest.lazy_fixture('one'), pytest.lazy_fixture('two')),
])
def test_func(arg1, arg2, arg3):
pass
""")
assert len(items) == 4
expected_results = [
{'one': 1, 'two': 3},
{'one': 1, 'two': 4},
{'one': 2, 'two': 3},
{'one': 2, 'two': 4}
]
def is_subset(subset, superset):
return all(superset[k] == subset[k] for k in subset)
for item in items:
assert any(is_subset(result, item.callspec.params) for result in expected_results)
def test_fixtures_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture
def two():
pass
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 1
assert items[0].callspec.params['one'].name == 'two'
def test_fixtures_with_params_in_parametrize_with_indirect(testdir):
items = testdir.getitems("""
import pytest
@pytest.fixture
def one():
pass
@pytest.fixture(params=[1,2])
def two(request):
return request.param
@pytest.mark.parametrize('arg1,one', [
('val1', pytest.lazy_fixture('two')),
], indirect=['one'])
def test_func(arg1, one):
pass
""")
assert len(items) == 2
assert items[0].callspec.params['two'] == 1
assert items[1].callspec.params['two'] == 2
def test_lazy_fixture_is_value_in_parametrize(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('arg1,arg2', [
pytest.lazy_fixture(('one', 'two'))
])
def test_func(arg1, arg2):
assert arg1 == 1
assert arg2 == 2
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_funcarg_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
@pytest.fixture
def three(request):
return request.param
@pytest.mark.parametrize('arg1,arg2,three', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('two'), '3')
], indirect=['three'])
def test_func(arg1, arg2, three):
assert arg1 == 1
assert arg2 == 2
assert three == '3'
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_lazy_fixture_is_value_in_parametrize_with_indirect(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return request.param
@pytest.fixture
def two():
return 2
@pytest.mark.parametrize('one', [
pytest.lazy_fixture('two')
], indirect=True)
def test_func(one):
assert one == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_lazy_fixture_as_param_of_fixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
@pytest.fixture
def one():
return 1
@pytest.fixture
def two():
return 2
def test_func(some):
assert some in [1, 2]
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_in_params_which_has_params(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 3
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=4)
def test_lazy_fixture_three_times_nested_with_one_failed(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[
1, 2, pytest.lazy_fixture('three')
])
def one(request):
return str(request.param)
@pytest.fixture
def two():
return 4
@pytest.fixture
def three():
return 5
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
pytest.lazy_fixture('two')
])
def some(request):
return request.param
def test_func(some):
assert some in {'1', '2', '3', 4}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3, failed=1)
def test_lazy_fixture_common_dependency(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.param)
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.param)
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_lazy_fixture_common_dependency_with_getfixturevalue(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_hex(request):
return hex(request.getfixturevalue('one'))
def test_as_str(as_str):
assert as_str in {'1', '2', '3'}
def test_as_hex(as_hex):
assert as_hex in {'0x1', '0x2', '0x3'}
def test_as_hex_vs_as_str(as_str, as_hex):
assert int(as_hex, 16) == int(as_str)
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val', ('a', 'b', 'c'))
def test_as_str(val, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'c1', 'c2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=9)
def test_issues2_2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def one(request):
return request.param
@pytest.fixture(params=[pytest.lazy_fixture('one')])
def as_str(request):
return str(request.getfixturevalue('one'))
@pytest.mark.parametrize('val, one', (
('a', '1'), ('b', '2'), ('c', '3')
), indirect=['one'])
def test_as_str(val, one, as_str):
combined = ''.join((val, as_str))
assert combined in {'a1', 'b2', 'c3'}
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=3)
def test_issues3_autouse_fixtures_should_run_first(testdir):
testdir.makepyfile("""
import pytest
gl = False
@pytest.fixture(autouse=True)
def auto_one():
global gl
gl = True
@pytest.fixture
def one():
return 1 if gl is True else -1
@pytest.mark.parametrize('arg1', [
pytest.lazy_fixture('one')
])
def test_some(arg1):
assert arg1 == 1
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=1)
def test_issues10_xfail(testdir):
testdir.makepyfile("""
import pytest
def division(a, b):
return a / b
@pytest.fixture(params=[0])
def zero(request):
return request.param
@pytest.mark.parametrize(('a', 'b'), [
pytest.param(1, pytest.lazy_fixture('zero'), marks=pytest.mark.xfail(reason=ZeroDivisionError))
])
def test_division(a, b):
division(a, b)
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=1)
def test_issues11_autouse_fixture_in_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels(object):
@pytest.fixture(autouse=True)
def setup(self):
self.var = 15
def test_model_a(self):
assert self.var == 15
def test_model_b(self):
assert self.var == 15
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_issues12_skip_test_function(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip'))
])
def test_skip1(a):
assert a == 1
@pytest.mark.skip(reason='skip')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_skip2(a):
assert a == 1
def test_after_skip(one):
assert one == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(skipped=2, passed=1)
def test_issues12_skip_test_method(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.skip(reason='skip this')
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_model_a(self, a):
assert a == 1
@pytest.mark.parametrize('a', [
pytest.param(pytest.lazy_fixture('one'), marks=pytest.mark.skip(reason='skip this'))
])
def test_model_b(self, a):
assert a == 1
def test_after_skip(self, one):
assert one == 1
""")
reprec = testdir.runpytest('-s', '-v')
reprec.assert_outcomes(skipped=2, passed=1)
def test_issues12_lf_as_method_of_test_class(testdir):
testdir.makepyfile("""
import pytest
class TestModels:
@pytest.fixture
def one(self):
return 1
@pytest.mark.parametrize('a', [
pytest.lazy_fixture('one')
])
def test_lf(self, a):
assert a == 1
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def test_issues13_unittest_testcase_class_should_not_fail(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TestModels(unittest.TestCase):
def test_models(self):
assert True
def test_models_fail(self):
assert False
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1, failed=1)
def test_argnames_initialized_in_right_order(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
@pytest.mark.parametrize('a,b', [
(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))
])
def test_skip1(a, b):
assert a == [3]
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
# https://github.com/TvoroG/pytest-lazy-fixture/pull/19
def test_argnames_initialized_in_right_order2(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one():
return [1]
@pytest.fixture
def plus_two(a):
a[0] = a[0] + 2
def test_skip1(a):
assert a == [3]
def pytest_generate_tests(metafunc):
metafunc.fixturenames = ['a', 'b']
metafunc.parametrize(argnames=['a', 'b'],
argvalues=[(pytest.lazy_fixture('one'), pytest.lazy_fixture('plus_two'))],
indirect=['b'])
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=1)
def lf(fname):
return lazy_fixture(fname)
@pytest.mark.parametrize('params,expected_paths', [
(
{'some': lf('one'), 'one': lf('three')},
['one>some'],
),
(
{'grand1': lf('parent1_1'), 'parent1_1': lf('child1'),
'grand2': lf('parent1_2'), 'parent1_2': lf('child1'),
'child1': lf('none')},
['child1>parent1_1>grand1>parent1_2>grand2', 'child1>parent1_2>grand2>parent1_1>grand1']
),
(
{'param1': 'val1', 'param2': 'val2'},
['param1>param2', 'param2>param1']
),
({}, ['']),
({'param1': 'val1'}, ['param1']),
({'param1': lf('some')}, ['param1']),
(
{'one': 1, 'as_str': lf('one'), 'as_hex': lf('one')},
['one>as_str>as_hex', 'one>as_hex>as_str']
)
])
def test_sorted_by_dependency(params, expected_paths):
sp = sorted_by_dependency(params, [])
path = '>'.join(param for param, _ in sp)
assert path in expected_paths
@pytest.mark.parametrize('params,fixturenames,expect_keys', [
({'b': 1, 'a': 0}, ['c', 'a', 'd', 'b'], ['c', 'a', 'd', 'b']),
({'b': 1, 'a': 0}, ['c', 'b'], ['c', 'b', 'a'])
])
def test_sorted_argnames(params, fixturenames, expect_keys):
assert list(_sorted_argnames(params, fixturenames)) == expect_keys
def test_lazy_fixtures_with_subfixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=["a", "A"])
def a(request):
return request.param
@pytest.fixture(params=["b", "B"])
def b(a, request):
return request.param + a
@pytest.fixture
def c(a):
return "c" + a
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b'), pytest.lazy_fixture('c')])
def d(request):
return "d" + request.param
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('d'), ""])
def e(request):
return "e" + request.param
def test_one(d):
assert d in ("da", "dA", "dba", "dbA", "dBa", "dBA", "dca", "dcA")
def test_two(e):
assert e in ("ea", "eA", "eda", "edA", "edba", "edbA", "edBa", "edBA", "edca", "edcA", "e")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=19)
def test_lazy_fixtures_in_subfixture(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def a():
return "a"
@pytest.fixture
def b():
return "b"
@pytest.fixture(params=[pytest.lazy_fixture('a'), pytest.lazy_fixture('b')])
def c(request):
return "c" + request.param
@pytest.fixture
def d(c):
return "d" + c
def test_one(d):
assert d in ("dca", "dcb")
""")
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize('autouse', [False, True])
def test_issues23(testdir, autouse):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[0, 1], autouse={})
def zero(request):
return request.param
@pytest.fixture(params=[1])
def one(request, zero):
return zero * request.param
@pytest.fixture(params=[
pytest.lazy_fixture('one'),
])
def some(request):
return request.param
def test_func(some):
assert some in [0, 1]
""".format(autouse))
reprec = testdir.inline_run('-s', '-v')
reprec.assertoutcome(passed=2)
def test_lazy_fixture_nested_fixtures(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def one(request):
return "SOME_VALUE"
@pytest.fixture
def two(request):
return "SOME_VALUE2"
@pytest.fixture(params=[
pytest.lazy_fixture("one"),
pytest.lazy_fixture("two"),
])
def some_fixture1(request):
return request.param
@pytest.fixture
def some_fixture2(some_fixture1):
return "NEW_" + some_fixture1
def test_func(some_fixture2):
assert ((some_fixture2 == "NEW_SOME_VALUE") or (some_fixture2 == "NEW_SOME_VALUE2"))
""")
reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=2)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_usefixture_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
'using module fixture using fixture1 using module fixture using fixture2' in stdout
)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True)
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append('using fixture1')
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append('using fixture2')
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
# pytest==3.2.5
'using autouse_fixture using module fixture using fixture1 using autouse_fixture using fixture2' in stdout
or
'using module fixture using autouse_fixture using fixture1 using autouse_fixture using fixture2' in stdout
)
@pytest.mark.parametrize('autouse_scope', [
'session',
'module',
pytest.param('function', marks=pytest.mark.xfail)
])
def test_session_autouse_and_usefixture_module_scope_runs_before_function_fixtures(testdir, autouse_scope):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(autouse=True, scope='{autouse_scope}')
def autouse_fixture():
invocation_order.append('using autouse_fixture')
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.usefixtures("module_fixture")
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""".format(autouse_scope=autouse_scope))
result = testdir.runpytest('-s')
assert 'using autouse_fixture using module fixture using fixture1 using fixture2' in result.stdout.str()
# https://github.com/TvoroG/pytest-lazy-fixture/issues/39
def test_module_scope_runs_before_function_fixtures(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
invocation_order = []
@pytest.fixture(scope='module')
def module_fixture():
invocation_order.append('using module fixture')
@pytest.fixture
def fixture1():
invocation_order.append("using fixture1")
return 'fixture1'
@pytest.fixture
def fixture2():
invocation_order.append("using fixture2")
return 'fixture2'
@pytest.mark.parametrize("fixt", [lazy_fixture("fixture1"), lazy_fixture("fixture2")])
def test_test(fixt, module_fixture):
if fixt == 'fixture2':
print(' '.join(invocation_order))
""")
result = testdir.runpytest('-s')
stdout = result.stdout.str()
assert (
# pytest==3.2.5
'using fixture1 using module fixture using fixture2' in stdout
or
'using module fixture using fixture1 using fixture2' in stdout
)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/42
@pytest.mark.skipif(numpy is None, reason='numpy is not installed')
def test_numpy_array_as_value(testdir):
testdir.makepyfile("""
import pytest
import numpy as np
@pytest.mark.parametrize(
'value',
[
np.arange(10, dtype=np.int64),
np.arange(10, dtype=np.int32),
]
)
def test_bug(value):
assert isinstance(value, np.ndarray)
""")
result = testdir.inline_run('-s')
result.assertoutcome(passed=2)
# https://github.com/TvoroG/pytest-lazy-fixture/issues/46
def test_lazy_fixture_ids(testdir):
testdir.makepyfile("""
import pytest
from pytest_lazyfixture import lazy_fixture
@pytest.fixture()
def foo():
return "foo"
@pytest.fixture(params=['spam', 'eggs'])
def bar(request):
return "bar-{}".format(request.param)
@pytest.mark.parametrize("data", [lazy_fixture("foo"),
lazy_fixture("bar")])
def test_the_thing(data):
assert False
""")
result = testdir.runpytest('--collect-only')
stdout = result.stdout.str()
assert 'test_the_thing[foo]' in stdout
assert 'test_the_thing[bar-spam]' in stdout
assert 'test_the_thing[bar-eggs]' in stdout
def test_eq():
assert lazy_fixture("Lol") == lazy_fixture("Lol")
assert lazy_fixture("Lol") != lazy_fixture("Wut")
assert lazy_fixture("Lol") != 123
| tests/test_lazyfixture.py | 26,353 | -*- coding: utf-8 -*- https://github.com/TvoroG/pytest-lazy-fixture/pull/19 https://github.com/TvoroG/pytest-lazy-fixture/issues/39 https://github.com/TvoroG/pytest-lazy-fixture/issues/39 pytest==3.2.5 https://github.com/TvoroG/pytest-lazy-fixture/issues/39 pytest==3.2.5 https://github.com/TvoroG/pytest-lazy-fixture/issues/42 https://github.com/TvoroG/pytest-lazy-fixture/issues/46 | 383 | en | 0.808437 |
import pandas as pd
import plotly.express as px
df = pd.read_csv('data/query_result.csv')
max_df = df.groupby(by='topic_id').max().reset_index()
df = df[df['topic_id'].isin(max_df[max_df['recall'] > 0]['topic_id'].to_list())]
for t in df['topic_id'].unique().tolist():
temp_df = df[df['topic_id'] == t]
fig = px.box(df, x="topic_id", y="recall")
fig.update_traces(quartilemethod="exclusive") # or "inclusive", or "linear" by default
fig.show()
| analyze_results.py | 462 | or "inclusive", or "linear" by default | 38 | en | 0.844034 |
#! /usr/bin/env python
# -*- coding: utf-8 -*
"""
Decorators to help manage our custom classes.
"""
TABLE_LIST = []
def register(cls):
"""
A decorator to register new table configuration classes.
"""
TABLE_LIST.append(cls)
return cls
def downloader(func):
"""
A decorator to download data inside a table configuration class.
"""
def inner(*args, **kwargs):
# Grab the TableConfig
table_config = args[0]
# Grab the geotype downloader class by running the metaprogramming function
downloader_klass = func(table_config)
# For each year authorized on the config
for year in table_config.years_to_download:
# Create the geotype downloader instance
downloader = downloader_klass(table_config, year)
# Download the raw data
downloader.download()
# Process the data
downloader.process()
return inner
| census_data_downloader/core/decorators.py | 956 | A decorator to download data inside a table configuration class.
A decorator to register new table configuration classes.
Decorators to help manage our custom classes.
! /usr/bin/env python -*- coding: utf-8 -* Grab the TableConfig Grab the geotype downloader class by running the metaprogramming function For each year authorized on the config Create the geotype downloader instance Download the raw data Process the data | 423 | en | 0.585509 |
from django.db import models
import uuid
from django.contrib.auth.models import User
from article.models import ArticlePost
from ckeditor.fields import RichTextField
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
class Comment(models.Model): # 博文评论
article = models.ForeignKey(
ArticlePost,
on_delete=models.CASCADE,
related_name='comments'
)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='comments'
)
body = RichTextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created',)
verbose_name_plural = '评论'
def __str__(self):
return self.body[:20] | comment/models.py | 760 | Create your models here. 博文评论 | 29 | en | 0.732734 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
from io import BytesIO
import logging
import optparse
import os
import pdb
import shutil
from struct import pack
import sys
import tempfile
import time
from . import coverage
from .address import wif_to_privkey
from .authproxy import JSONRPCException
from .blocktools import (
create_block,
create_coinbase_pos,
create_transaction_from_outpoint,
)
from .key import CECKey
from .messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
hash256,
)
from .script import (
CScript,
OP_CHECKSIG,
)
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
check_json_precision,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
Decimal,
DEFAULT_FEE,
get_datadir_path,
hex_str_to_bytes,
bytes_to_hex_str,
initialize_datadir,
p2p_port,
set_node_times,
SPORK_ACTIVATION_TIME,
SPORK_DEACTIVATION_TIME,
vZC_DENOMS,
wait_until,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "t_notes_func_test_"
class t_notesTestFramework():
"""Base class for a t_notes test script.
Individual t_notes test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.rpc_timewait = 600 # Wait for up to 600 seconds for the RPC server to respond
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave t_notesds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop t_notesds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing t_notesd/t_notes-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option('--legacywallet', dest="legacywallet", default=False, action="store_true",
help='create pre-HD wallets only')
parser.add_option('--tiertwo', dest="tiertwo", default=False, action="store_true",
help='run tier two tests only')
parser.add_option('--sapling', dest="sapling", default=False, action="store_true",
help='run tier two tests only')
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use t_notes-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: t_notesds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
# Check wallet version
if self.options.legacywallet:
for arg in extra_args:
arg.append('-legacywallet')
self.log.info("Running test with legacy (pre-HD) wallet")
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=self.rpc_timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a t_notesd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple t_notesds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a t_notesd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple t_notesd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 't_notesd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "t_notesd should have exited with an error"
else:
assert_msg = "t_notesd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
"""
self.mocktime = 1572546080
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as t_notesd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test."""
def create_cachedir(cachedir):
if os.path.isdir(cachedir):
shutil.rmtree(cachedir)
os.makedirs(cachedir)
def copy_cachedir(origin, destination, num_nodes=MAX_NODES):
for i in range(num_nodes):
from_dir = get_datadir_path(origin, i)
to_dir = get_datadir_path(destination, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(destination, i) # Overwrite port/rpcport in t-notes.conf
def clone_cache_from_node_1(cachedir, from_num=4):
""" Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES"""
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
assert from_num < MAX_NODES
node_0_datadir = os.path.join(get_datadir_path(cachedir, 0), "regtest")
for i in range(from_num, MAX_NODES):
node_i_datadir = os.path.join(get_datadir_path(cachedir, i), "regtest")
for subdir in ["blocks", "chainstate", "sporks"]:
copy_and_overwrite(os.path.join(node_0_datadir, subdir),
os.path.join(node_i_datadir, subdir))
initialize_datadir(cachedir, i) # Overwrite port/rpcport in t-notes.conf
def cachedir_valid(cachedir):
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(cachedir, i)):
return False
# nodes directories exist. check if the first one has the .incomplete flagfile
return (not os.path.exists(os.path.join(get_datadir_path(cachedir, 0), ".incomplete")))
def clean_cache_subdir(cachedir):
os.remove(os.path.join(get_datadir_path(cachedir, 0), ".incomplete"))
def cache_path(n, *paths):
return os.path.join(get_datadir_path(cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'backups']:
os.remove(cache_path(i, entry))
def clean_cache_dir():
if os.path.isdir(self.options.cachedir):
# migrate old cache dir
if cachedir_valid(self.options.cachedir):
powcachedir = os.path.join(self.options.cachedir, "pow")
self.log.info("Found old cachedir. Migrating to %s" % str(powcachedir))
copy_cachedir(self.options.cachedir, powcachedir)
# remove everything except pow subdir
for entry in os.listdir(self.options.cachedir):
if entry != 'pow':
entry_path = os.path.join(self.options.cachedir, entry)
if os.path.isfile(entry_path):
os.remove(entry_path)
elif os.path.isdir(entry_path):
shutil.rmtree(entry_path)
# no cachedir found
else:
os.makedirs(self.options.cachedir)
def start_nodes_from_dir(ddir, num_nodes=MAX_NODES):
self.log.info("Starting %d nodes..." % num_nodes)
for i in range(num_nodes):
datadir = initialize_datadir(ddir, i)
if i == 0:
# Add .incomplete flagfile
# (removed at the end during clean_cache_subdir)
open(os.path.join(datadir, ".incomplete"), 'a').close()
args = [os.getenv("BITCOIND", "t_notesd"), "-spendzeroconfchange=1", "-server", "-keypool=1",
"-datadir=" + datadir, "-discover=0"]
self.nodes.append(
TestNode(i, ddir, extra_args=[], rpchost=None, timewait=self.rpc_timewait, binary=None, stderr=None,
mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
self.log.info("Node %d started." % i)
# Wait for RPC connections to be ready
self.log.info("Nodes started. Waiting for RPC connections...")
for node in range(4):
self.nodes[node].wait_for_rpc_connection()
self.log.info("Connecting nodes")
connect_nodes_clique(self.nodes)
def stop_and_clean_cache_dir(ddir):
self.stop_nodes()
self.nodes = []
# Copy cache for nodes 5 to MAX_NODES
self.log.info("Copying cache dir to non-started nodes")
clone_cache_from_node_1(ddir)
self.log.info("Cleaning up.")
clean_cache_subdir(ddir)
def generate_pow_cache():
### POW Cache ###
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 1 minutes apart
# starting from 331 minutes in the past
# Create cache directories, run t_notesds:
create_cachedir(powcachedir)
self.log.info("Creating 'PoW-chain': 200 blocks")
start_nodes_from_dir(powcachedir, 4)
# Mine the blocks
self.log.info("Mining 200 blocks")
self.enable_mocktime()
block_time = self.mocktime - (331 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
self.sync_blocks()
# Shut them down, and clean up cache directories:
self.log.info("Stopping nodes")
stop_and_clean_cache_dir(powcachedir)
self.log.info("---> pow cache created")
self.disable_mocktime()
assert self.num_nodes <= MAX_NODES
clean_cache_dir()
powcachedir = os.path.join(self.options.cachedir, "pow")
is_powcache_valid = cachedir_valid(powcachedir)
if not is_powcache_valid:
self.log.info("PoW-CACHE NOT FOUND or INVALID.")
self.log.info("Creating new cached blockchain data.")
generate_pow_cache()
else:
self.log.info("CACHE FOUND.")
# Copy requested cache to tempdir
self.log.info("Copying datadir from %s to %s" % (powcachedir, self.options.tmpdir))
copy_cachedir(powcachedir, self.options.tmpdir, self.num_nodes)
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
### T_Notes Specific TestFramework ###
###################################
def init_dummy_key(self):
self.DUMMY_KEY = CECKey()
self.DUMMY_KEY.set_secretbytes(hash256(pack('<I', 0xffff)))
def get_prevouts(self, node_id, utxo_list):
""" get prevouts (map) for each utxo in a list
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.
utxo_list: (JSON list) utxos returned from listunspent used as input
:return: prevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
prevouts = {}
for utxo in utxo_list:
outPoint = COutPoint(int(utxo['txid'], 16), utxo['vout'])
outValue = int(utxo['amount']) * COIN
prevtx_json = rpc_conn.getrawtransaction(utxo['txid'], 1)
prevTx = CTransaction()
prevTx.deserialize(BytesIO(hex_str_to_bytes(prevtx_json['hex'])))
if (prevTx.is_coinbase() or prevTx.is_coinstake()) and utxo['confirmations'] < 100:
# skip immature coins
continue
prevScript = prevtx_json['vout'][utxo['vout']]['scriptPubKey']['hex']
prevTime = prevtx_json['blocktime']
prevouts[outPoint.serialize_uniqueness()] = (outValue, prevScript, prevTime)
return prevouts
def make_txes(self, node_id, spendingPrevOuts, to_pubKey):
""" makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey
:param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.
spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
to_pubKey (bytes) recipient public key
:return: block_txes: ([CTransaction] list)
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
block_txes = []
for uniqueness in spendingPrevOuts:
value_out = int(spendingPrevOuts[uniqueness][0] - DEFAULT_FEE * COIN)
scriptPubKey = CScript([to_pubKey, OP_CHECKSIG])
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(uniqueness))
tx = create_transaction_from_outpoint(prevout, b"", value_out, scriptPubKey)
# sign tx
raw_spend = rpc_conn.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
# add signed tx to the list
signed_tx = CTransaction()
signed_tx.from_hex(raw_spend)
block_txes.append(signed_tx)
return block_txes
def stake_block(self,
node_id,
nVersion,
nHeight,
prevHash,
prevModifier,
finalsaplingroot,
stakeableUtxos,
startTime,
privKeyWIF,
vtx,
fDoubleSpend):
""" manually stakes a block selecting the coinstake input from a list of candidates
:param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.
nVersion: (int) version of the block being produced (7 or 8)
nHeight: (int) height of the block being produced
prevHash: (string) hex string of the previous block hash
prevModifier (string) hex string of the previous block stake modifier
finalsaplingroot (string) hex string of the previous block sapling root (blocks V8)
stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)
privKeyWIF: (string) private key to be used for staking/signing
If empty string, it will be used the pk from the stake input
(dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.
vtx: ([CTransaction] list) transactions to add to block.vtx
fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input
:return: block: (CBlock) block produced, must be manually relayed
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
if not len(stakeableUtxos) > 0:
raise Exception("Need at least one stakeable utxo to stake a block!")
# Get start time to stake
if startTime is None:
startTime = time.time()
# Create empty block with coinbase
nTime = int(startTime) & 0xfffffff0
coinbaseTx = create_coinbase_pos(nHeight)
block = create_block(int(prevHash, 16), coinbaseTx, nTime, nVersion, int(finalsaplingroot, 16))
block.nVersion = nVersion
# Find valid kernel hash - iterates stakeableUtxos, then block.nTime
block.solve_stake(stakeableUtxos, int(prevModifier, 16))
block_sig_key = CECKey()
coinstakeTx_unsigned = CTransaction()
prevout = COutPoint()
prevout.deserialize_uniqueness(BytesIO(block.prevoutStake))
coinstakeTx_unsigned.vin.append(CTxIn(prevout, b"", 0xffffffff))
coinstakeTx_unsigned.vout.append(CTxOut())
amount, prevScript, _ = stakeableUtxos[block.prevoutStake]
outNValue = int(amount + 250 * COIN)
coinstakeTx_unsigned.vout.append(CTxOut(outNValue, hex_str_to_bytes(prevScript)))
if privKeyWIF == "":
# Use dummy key
if not hasattr(self, 'DUMMY_KEY'):
self.init_dummy_key()
block_sig_key = self.DUMMY_KEY
# replace coinstake output script
coinstakeTx_unsigned.vout[1].scriptPubKey = CScript([block_sig_key.get_pubkey(), OP_CHECKSIG])
else:
if privKeyWIF == None:
# Use pk of the input. Ask sk from rpc_conn
rawtx = rpc_conn.getrawtransaction('{:064x}'.format(prevout.hash), True)
privKeyWIF = rpc_conn.dumpprivkey(rawtx["vout"][prevout.n]["scriptPubKey"]["addresses"][0])
# Use the provided privKeyWIF (cold staking).
# export the corresponding private key to sign block
privKey, compressed = wif_to_privkey(privKeyWIF)
block_sig_key.set_compressed(compressed)
block_sig_key.set_secretbytes(bytes.fromhex(privKey))
# Sign coinstake TX and add it to the block
stake_tx_signed_raw_hex = rpc_conn.signrawtransaction(
bytes_to_hex_str(coinstakeTx_unsigned.serialize()))['hex']
# Add coinstake to the block
coinstakeTx = CTransaction()
coinstakeTx.from_hex(stake_tx_signed_raw_hex)
block.vtx.append(coinstakeTx)
# Add provided transactions to the block.
# Don't add tx doublespending the coinstake input, unless fDoubleSpend=True
for tx in vtx:
if not fDoubleSpend and tx.spends(prevout):
continue
block.vtx.append(tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# sign block with block signing key and return it
block.sign_block(block_sig_key)
return block
def stake_next_block(self, node_id,
stakeableUtxos,
btime=None,
privKeyWIF=None,
vtx=[],
fDoubleSpend=False):
""" Calls stake_block appending to the current tip"""
assert_greater_than(len(self.nodes), node_id)
saplingActive = self.nodes[node_id].getblockchaininfo()['upgrades']['v5 shield']['status'] == 'active'
blockVersion = 8 if saplingActive else 7
nHeight = self.nodes[node_id].getblockcount()
prevHhash = self.nodes[node_id].getblockhash(nHeight)
prevBlock = self.nodes[node_id].getblock(prevHhash, True)
prevModifier = prevBlock['stakeModifier']
saplingRoot = prevBlock['finalsaplingroot'] # !TODO: update this if the block contains sapling txes
return self.stake_block(node_id,
blockVersion,
nHeight+1,
prevHhash,
prevModifier,
saplingRoot,
stakeableUtxos,
btime,
privKeyWIF,
vtx,
fDoubleSpend)
def check_tx_in_chain(self, node_id, txid):
assert_greater_than(len(self.nodes), node_id)
rawTx = self.nodes[node_id].getrawtransaction(txid, 1)
assert_greater_than(rawTx["confirmations"], 0)
def spend_inputs(self, node_id, inputs, outputs):
""" auxiliary function used by spend_utxo / spend_utxos """
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
spendingTx = rpc_conn.createrawtransaction(inputs, outputs)
spendingTx_signed = rpc_conn.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = rpc_conn.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
return ""
def spend_utxo(self, node_id, utxo, recipient=''):
""" spend amount from previously unspent output to a provided address
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo: (JSON) returned from listunspent used as input
recipient: (string) destination address (new one if not provided)
:return: txhash: (string) tx hash if successful, empty string otherwise
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
inputs = [{"txid": utxo["txid"], "vout": utxo["vout"]}]
out_amount = float(utxo["amount"]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
return self.spend_inputs(node_id, inputs, outputs)
def spend_utxos(self, node_id, utxo_list, recipient='', fMultiple=False):
""" spend utxos to provided list of addresses or 10 new generate ones.
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo_list: (JSON list) returned from listunspent used as input
recipient: (string, optional) destination address (new one if not provided)
fMultiple: (boolean, optional, default=false) spend each utxo on a different tx
:return: txHashes: (string list) list of hashes of completed txs
"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
txHashes = []
# If no recipient is given, create a new one
if recipient == '':
recipient = rpc_conn.getnewaddress()
# If fMultiple=True send one tx for each utxo
if fMultiple:
for utxo in utxo_list:
txHash = self.spend_utxo(node_id, utxo, recipient)
if txHash != "":
txHashes.append(txHash)
# Otherwise make a single tx with all the inputs
else:
inputs = [{"txid": x["txid"], "vout": x["vout"]} for x in utxo_list]
out_amount = sum([float(x["amount"]) for x in utxo_list]) - DEFAULT_FEE
outputs = {}
if recipient == '':
recipient = rpc_conn.getnewaddress()
outputs[recipient] = out_amount
txHash = self.spend_inputs(node_id, inputs, outputs)
if txHash != "":
txHashes.append(txHash)
return txHashes
def generate_pos(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
rpc_conn = self.nodes[node_id]
ss = rpc_conn.getstakingstatus()
assert ss["walletunlocked"]
assert ss["stakeablecoins"] > 0
assert ss["stakingbalance"] > 0.0
if btime is not None:
next_btime = btime + 60
fStaked = False
failures = 0
while not fStaked:
try:
rpc_conn.generate(1)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
failures += 1
# couldn't generate block. check that this node can still stake (after 60 failures)
if failures > 60:
ss = rpc_conn.getstakingstatus()
if not (ss["walletunlocked"] and ss["stakeablecoins"] > 0 and ss["stakingbalance"] > 0.0):
raise AssertionError("Node %d unable to stake!" % node_id)
# try to stake one sec in the future
if btime is not None:
btime += 1
set_node_times(self.nodes, btime)
else:
time.sleep(1)
else:
raise e
# block generated. adjust block time
if btime is not None:
btime = max(btime + 1, next_btime)
set_node_times(self.nodes, btime)
return btime
else:
return None
def generate_pow(self, node_id, btime=None):
""" stakes a block using generate on nodes[node_id]"""
assert_greater_than(len(self.nodes), node_id)
self.nodes[node_id].generate(1)
if btime is not None:
btime += 60
set_node_times(self.nodes, btime)
return btime
def set_spork(self, node_id, sporkName, value):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork(sporkName, value)
def get_spork(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("show")[sporkName]
def activate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_ACTIVATION_TIME)
def deactivate_spork(self, node_id, sporkName):
return self.set_spork(node_id, sporkName, SPORK_DEACTIVATION_TIME)
def is_spork_active(self, node_id, sporkName):
assert_greater_than(len(self.nodes), node_id)
return self.nodes[node_id].spork("active")[sporkName]
def get_mn_lastseen(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return -1
return mnData[0]["lastseen"]
def get_mn_status(self, node, mnTxHash):
mnData = node.listmasternodes(mnTxHash)
if len(mnData) == 0:
return ""
assert_equal(len(mnData), 1)
return mnData[0]["status"]
def advance_mocktime(self, secs):
self.mocktime += secs
set_node_times(self.nodes, self.mocktime)
time.sleep(1)
def wait_until_mnsync_finished(self):
SYNC_FINISHED = [999] * self.num_nodes
synced = [-1] * self.num_nodes
time.sleep(2)
timeout = time.time() + 45
while synced != SYNC_FINISHED and time.time() < timeout:
for i in range(self.num_nodes):
if synced[i] != SYNC_FINISHED[i]:
synced[i] = self.nodes[i].mnsync("status")["RequestedMasternodeAssets"]
if synced != SYNC_FINISHED:
self.advance_mocktime(2)
time.sleep(5)
if synced != SYNC_FINISHED:
raise AssertionError("Unable to complete mnsync: %s" % str(synced))
def wait_until_mn_status(self, status, mnTxHash, _timeout, orEmpty=False, with_ping_mns=[]):
nodes_status = [None] * self.num_nodes
def node_synced(i):
return nodes_status[i] == status or (orEmpty and nodes_status[i] == "")
def all_synced():
for i in range(self.num_nodes):
if not node_synced(i):
return False
return True
time.sleep(2)
timeout = time.time() + _timeout
while not all_synced() and time.time() < timeout:
for i in range(self.num_nodes):
if not node_synced(i):
nodes_status[i] = self.get_mn_status(self.nodes[i], mnTxHash)
if not all_synced():
time.sleep(2)
self.send_pings(with_ping_mns)
if not all_synced():
strErr = "Unable to get get status \"%s\" on all nodes for mnode %s. Current: %s" % (
status, mnTxHash, str(nodes_status))
raise AssertionError(strErr)
def wait_until_mn_enabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_preenabled(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("PRE_ENABLED", mnTxHash, _timeout, with_ping_mns=_with_ping_mns)
def wait_until_mn_vinspent(self, mnTxHash, _timeout, _with_ping_mns=[]):
self.wait_until_mn_status("VIN_SPENT", mnTxHash, _timeout, orEmpty=True, with_ping_mns=_with_ping_mns)
def controller_start_masternode(self, mnOwner, masternodeAlias):
ret = mnOwner.startmasternode("alias", "false", masternodeAlias, True)
assert_equal(ret["result"], "success")
time.sleep(1)
def send_pings(self, mnodes):
for node in mnodes:
sent = node.mnping()["sent"]
if sent != "YES" and "Too early to send Masternode Ping" not in sent:
raise AssertionError("Unable to send ping: \"sent\" = %s" % sent)
time.sleep(1)
def stake_and_sync(self, node_id, num_blocks):
for i in range(num_blocks):
self.mocktime = self.generate_pos(node_id, self.mocktime)
self.sync_blocks()
time.sleep(1)
def stake_and_ping(self, node_id, num_blocks, with_ping_mns=[]):
# stake blocks and send mn pings in between
for i in range(num_blocks):
self.stake_and_sync(node_id, 1)
if len(with_ping_mns) > 0:
self.send_pings(with_ping_mns)
def setupMasternode(self,
mnOwner,
miner,
masternodeAlias,
mnOwnerDirPath,
mnRemotePos,
masternodePrivKey):
self.log.info("adding balance to the mn owner for " + masternodeAlias + "..")
mnAddress = mnOwner.getnewaddress(masternodeAlias)
# send to the owner the collateral tx cost
collateralTxId = miner.sendtoaddress(mnAddress, Decimal('10000'))
# confirm and verify reception
self.stake_and_sync(self.nodes.index(miner), 1)
assert_greater_than_or_equal(mnOwner.getbalance(), Decimal('10000'))
assert_greater_than(mnOwner.getrawtransaction(collateralTxId, 1)["confirmations"], 0)
self.log.info("all good, creating masternode " + masternodeAlias + "..")
# get the collateral output using the RPC command
mnCollateralOutputIndex = -1
for mnc in mnOwner.getmasternodeoutputs():
if collateralTxId == mnc["txhash"]:
mnCollateralOutputIndex = mnc["outputidx"]
break
assert_greater_than(mnCollateralOutputIndex, -1)
self.log.info("collateral accepted for "+ masternodeAlias +". Updating masternode.conf...")
# verify collateral confirmed
confData = "%s %s %s %s %d" % (
masternodeAlias, "127.0.0.1:" + str(p2p_port(mnRemotePos)),
masternodePrivKey, collateralTxId, mnCollateralOutputIndex)
destinationDirPath = mnOwnerDirPath
destPath = os.path.join(destinationDirPath, "masternode.conf")
with open(destPath, "a+") as file_object:
file_object.write("\n")
file_object.write(confData)
# lock the collateral
mnOwner.lockunspent(False, [{"txid": collateralTxId, "vout": mnCollateralOutputIndex}])
# return the collateral id
return collateralTxId
### ------------------------------------------------------
class ComparisonTestFramework(t_notesTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some t_notesd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "t_notesd"),
help="t_notesd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
'''
t_notesTestFramework extensions
'''
class t_notesTier2TestFramework(t_notesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 5
self.extra_args = [[],
["-listen", "-externalip=127.0.0.1"],
[],
["-listen", "-externalip=127.0.0.1"],
["-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi"]]
self.enable_mocktime()
self.ownerOnePos = 0
self.remoteOnePos = 1
self.ownerTwoPos = 2
self.remoteTwoPos = 3
self.minerPos = 4
self.masternodeOneAlias = "mnOne"
self.masternodeTwoAlias = "mntwo"
self.mnOnePrivkey = "9247iC59poZmqBYt9iDh9wDam6v9S1rW5XekjLGyPnDhrDkP4AK"
self.mnTwoPrivkey = "92Hkebp3RHdDidGZ7ARgS4orxJAGyFUPDXNqtsYsiwho1HGVRbF"
# Updated in setup_2_masternodes_network() to be called at the start of run_test
self.ownerOne = None # self.nodes[self.ownerOnePos]
self.remoteOne = None # self.nodes[self.remoteOnePos]
self.ownerTwo = None # self.nodes[self.ownerTwoPos]
self.remoteTwo = None # self.nodes[self.remoteTwoPos]
self.miner = None # self.nodes[self.minerPos]
self.mnOneTxHash = ""
self.mnTwoTxHash = ""
def send_3_pings(self):
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
self.stake(1, [self.remoteOne, self.remoteTwo])
self.advance_mocktime(30)
self.send_pings([self.remoteOne, self.remoteTwo])
time.sleep(2)
def stake(self, num_blocks, with_ping_mns=[]):
self.stake_and_ping(self.minerPos, num_blocks, with_ping_mns)
def controller_start_all_masternodes(self):
self.controller_start_masternode(self.ownerOne, self.masternodeOneAlias)
self.controller_start_masternode(self.ownerTwo, self.masternodeTwoAlias)
self.wait_until_mn_preenabled(self.mnOneTxHash, 40)
self.wait_until_mn_preenabled(self.mnTwoTxHash, 40)
self.log.info("masternodes started, waiting until both get enabled..")
self.send_3_pings()
self.wait_until_mn_enabled(self.mnOneTxHash, 120, [self.remoteOne, self.remoteTwo])
self.wait_until_mn_enabled(self.mnTwoTxHash, 120, [self.remoteOne, self.remoteTwo])
self.log.info("masternodes enabled and running properly!")
def advance_mocktime_and_stake(self, secs_to_add):
self.advance_mocktime(secs_to_add - 60 + 1)
self.mocktime = self.generate_pos(self.minerPos, self.mocktime)
time.sleep(2)
def setup_2_masternodes_network(self):
self.ownerOne = self.nodes[self.ownerOnePos]
self.remoteOne = self.nodes[self.remoteOnePos]
self.ownerTwo = self.nodes[self.ownerTwoPos]
self.remoteTwo = self.nodes[self.remoteTwoPos]
self.miner = self.nodes[self.minerPos]
ownerOneDir = os.path.join(self.options.tmpdir, "node0")
ownerTwoDir = os.path.join(self.options.tmpdir, "node2")
self.log.info("generating 259 blocks..")
# First mine 250 PoW blocks
for i in range(250):
self.mocktime = self.generate_pow(self.minerPos, self.mocktime)
self.sync_blocks()
# Then start staking
self.stake(9)
self.log.info("masternodes setup..")
# setup first masternode node, corresponding to nodeOne
self.mnOneTxHash = self.setupMasternode(
self.ownerOne,
self.miner,
self.masternodeOneAlias,
os.path.join(ownerOneDir, "regtest"),
self.remoteOnePos,
self.mnOnePrivkey)
# setup second masternode node, corresponding to nodeTwo
self.mnTwoTxHash = self.setupMasternode(
self.ownerTwo,
self.miner,
self.masternodeTwoAlias,
os.path.join(ownerTwoDir, "regtest"),
self.remoteTwoPos,
self.mnTwoPrivkey)
self.log.info("masternodes setup completed, initializing them..")
# now both are configured, let's activate the masternodes
self.stake(1)
time.sleep(3)
self.advance_mocktime(10)
remoteOnePort = p2p_port(self.remoteOnePos)
remoteTwoPort = p2p_port(self.remoteTwoPos)
self.remoteOne.initmasternode(self.mnOnePrivkey, "127.0.0.1:"+str(remoteOnePort))
self.remoteTwo.initmasternode(self.mnTwoPrivkey, "127.0.0.1:"+str(remoteTwoPort))
# wait until mnsync complete on all nodes
self.stake(1)
self.wait_until_mnsync_finished()
self.log.info("tier two synced! starting masternodes..")
# Now everything is set, can start both masternodes
self.controller_start_all_masternodes()
| test/functional/test_framework/test_framework.py | 53,858 | Test framework for doing p2p comparison testing
Sets up some t_notesd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries
This exception is raised to skip a test
Base class for a t_notes test script.
Individual t_notes test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods.
Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method
Initialize a pre-mined blockchain for use by the test.
Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
Instantiate TestNode objects
Override this method to add command-line options to the test
Clones cache subdir from node 1 to nodes from 'from_num' to MAX_NODES
Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
Sets mocktime to Tuesday, October 31, 2017 6:21:20 PM GMT (1572546080)
stakes a block using generate on nodes[node_id]
stakes a block using generate on nodes[node_id]
get prevouts (map) for each utxo in a list
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxos.
utxo_list: (JSON list) utxos returned from listunspent used as input
:return: prevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
Join the (previously split) network halves together.
Main function. This should not be overridden by the subclass test scripts.
makes a list of CTransactions each spending an input from spending PrevOuts to an output to_pubKey
:param node_id: (int) index of the CTestNode used as rpc connection. Must own spendingPrevOuts.
spendingPrevouts: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
to_pubKey (bytes) recipient public key
:return: block_txes: ([CTransaction] list)
Stop and start a test node
Tests must override this method to define test logic
Tests must this method to change default values for number of nodes, topology, etc
Override this method to customize blockchain setup
Override this method to customize test network topology
Override this method to customize test node setup
auxiliary function used by spend_utxo / spend_utxos
spend amount from previously unspent output to a provided address
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo: (JSON) returned from listunspent used as input
recipient: (string) destination address (new one if not provided)
:return: txhash: (string) tx hash if successful, empty string otherwise
spend utxos to provided list of addresses or 10 new generate ones.
:param node_id: (int) index of the CTestNode used as rpc connection. Must own the utxo.
utxo_list: (JSON list) returned from listunspent used as input
recipient: (string, optional) destination address (new one if not provided)
fMultiple: (boolean, optional, default=false) spend each utxo on a different tx
:return: txHashes: (string list) list of hashes of completed txs
Split the network of four nodes into nodes 0/1 and 2/3.
manually stakes a block selecting the coinstake input from a list of candidates
:param node_id: (int) index of the CTestNode used as rpc connection. Must own stakeableUtxos.
nVersion: (int) version of the block being produced (7 or 8)
nHeight: (int) height of the block being produced
prevHash: (string) hex string of the previous block hash
prevModifier (string) hex string of the previous block stake modifier
finalsaplingroot (string) hex string of the previous block sapling root (blocks V8)
stakeableUtxos: ({bytes --> (int, bytes, int)} dictionary)
maps CStake "uniqueness" (i.e. serialized COutPoint)
to (amount, prevScript, timeBlockFrom).
startTime: (int) epoch time to be used as blocktime (iterated in solve_stake)
privKeyWIF: (string) private key to be used for staking/signing
If empty string, it will be used the pk from the stake input
(dumping the sk from rpc_conn). If None, then the DUMMY_KEY will be used.
vtx: ([CTransaction] list) transactions to add to block.vtx
fDoubleSpend: (bool) wether any tx in vtx is allowed to spend the coinstake input
:return: block: (CBlock) block produced, must be manually relayed
Calls stake_block appending to the current tip
Start a t_notesd
Start multiple t_notesds
Stop a t_notesd test node
Stop multiple t_notesd test nodes
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
Wait until everybody has the same transactions in their memory
pools
Base class for RPC testing.
!/usr/bin/env python3 Copyright (c) 2014-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Wait for up to 600 seconds for the RPC server to respond Set up temp directory and start logging Methods to override in subclass test scripts. Connect the nodes as a "chain". This allows us to split the network between nodes 1 and 2 to get two halves that can work on competing chains. Topology looks like this: node0 <-- node1 <-- node2 <-- node3 If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To ensure block propagation, all nodes will establish outgoing connections toward node0. See fPreferredDownload in net_processing. If further outbound connections are needed, they can be added at the beginning of the test with e.g. connect_nodes(self.nodes[1], 2) Public helper methods. These can be accessed by the subclass test scripts. Check wallet version If one node failed to start, stop the others Issue RPC to stop nodes Wait for nodes to stop node must have shutdown Check that each peer has at least one connection Check that each peer has at least one connection Private helper methods. These should not be accessed by the subclass test scripts. Add logger and logging handlers Create file handler to log all messages Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int Format logs the same as t_notesd's debug.log with microprecision (so log files can be concatenated and sorted) add the handlers to the logger Overwrite port/rpcport in t-notes.conf Overwrite port/rpcport in t-notes.conf nodes directories exist. check if the first one has the .incomplete flagfile migrate old cache dir remove everything except pow subdir no cachedir found Add .incomplete flagfile (removed at the end during clean_cache_subdir) Wait for RPC connections to be ready Copy cache for nodes 5 to MAX_NODES POW Cache Create a 200-block-long chain; each of the 4 first nodes gets 25 mature blocks and 25 immature. Note: To preserve compatibility with older versions of initialize_chain, only 4 nodes will generate coins. blocks are created with timestamps 1 minutes apart starting from 331 minutes in the past Create cache directories, run t_notesds: Mine the blocks Must sync before next peer starts generating blocks Shut them down, and clean up cache directories: Copy requested cache to tempdir T_Notes Specific TestFramework skip immature coins sign tx add signed tx to the list Get start time to stake Create empty block with coinbase Find valid kernel hash - iterates stakeableUtxos, then block.nTime Use dummy key replace coinstake output script Use pk of the input. Ask sk from rpc_conn Use the provided privKeyWIF (cold staking). export the corresponding private key to sign block Sign coinstake TX and add it to the block Add coinstake to the block Add provided transactions to the block. Don't add tx doublespending the coinstake input, unless fDoubleSpend=True Get correct MerkleRoot and rehash block sign block with block signing key and return it !TODO: update this if the block contains sapling txes If no recipient is given, create a new one If fMultiple=True send one tx for each utxo Otherwise make a single tx with all the inputs couldn't generate block. check that this node can still stake (after 60 failures) try to stake one sec in the future block generated. adjust block time stake blocks and send mn pings in between send to the owner the collateral tx cost confirm and verify reception get the collateral output using the RPC command verify collateral confirmed lock the collateral return the collateral id ------------------------------------------------------ Updated in setup_2_masternodes_network() to be called at the start of run_test self.nodes[self.ownerOnePos] self.nodes[self.remoteOnePos] self.nodes[self.ownerTwoPos] self.nodes[self.remoteTwoPos] self.nodes[self.minerPos] First mine 250 PoW blocks Then start staking setup first masternode node, corresponding to nodeOne setup second masternode node, corresponding to nodeTwo now both are configured, let's activate the masternodes wait until mnsync complete on all nodes Now everything is set, can start both masternodes | 10,359 | en | 0.785993 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.