text
stringlengths 2
999k
|
|---|
import asyncio
import string
from abc import ABC
from datetime import date, datetime
from random import SystemRandom
from typing import List, Optional
import pytest
from arango import ArangoClient
from arango.database import StandardDatabase
from arango.typings import Json
from networkx import DiGraph, MultiDiGraph
from core.analytics import AnalyticsEventSender, CoreEvent, InMemoryEventSender
from core.db.async_arangodb import AsyncArangoDB
from core.db.graphdb import ArangoGraphDB, GraphDB, EventGraphDB
from core.db.model import QueryModel, GraphUpdate
from core.error import ConflictingChangeInProgress, NoSuchChangeError, InvalidBatchUpdate
from core.model.adjust_node import NoAdjust
from core.model.graph_access import GraphAccess, EdgeType, Section
from core.model.model import Model, ComplexKind, Property, Kind, SyntheticProperty
from core.model.typed_model import from_js, to_js
from core.query.model import Query, P, Navigation
from core.query.query_parser import parse_query
from core.types import JsonElement
from core.util import AccessJson, utc, value_in_path, AccessNone
# noinspection PyUnresolvedReferences
from tests.core.analytics import event_sender
class BaseResource(ABC):
def __init__(
self,
identifier: str,
) -> None:
self.identifier = str(identifier)
# this method should be defined in all resources
def kind(self) -> str:
pass
class Foo(BaseResource):
def __init__(
self,
identifier: str,
name: Optional[str] = None,
some_int: int = 0,
some_string: str = "hello",
now_is: datetime = utc(),
ctime: Optional[datetime] = None,
) -> None:
super().__init__(identifier)
self.name = name
self.some_int = some_int
self.some_string = some_string
self.now_is = now_is
self.ctime = ctime
def kind(self) -> str:
return "foo"
class Bla(BaseResource):
def __init__(
self,
identifier: str,
name: Optional[str] = None,
now: date = date.today(),
f: int = 23,
g: Optional[List[int]] = None,
) -> None:
super().__init__(identifier)
self.name = name
self.now = now
self.f = f
self.g = g if g is not None else list(range(0, 5))
def kind(self) -> str:
return "bla"
def create_graph(bla_text: str, width: int = 10) -> MultiDiGraph:
graph = MultiDiGraph()
def add_edge(from_node: str, to_node: str, edge_type: str = EdgeType.dependency) -> None:
key = GraphAccess.edge_key(from_node, to_node, edge_type)
graph.add_edge(from_node, to_node, key, edge_type=edge_type)
def add_node(uid: str, kind: str, node: Optional[Json] = None, replace: bool = False) -> None:
reported = {**(node if node else to_json(Foo(uid))), "kind": kind}
graph.add_node(
uid,
id=uid,
kinds=[kind],
reported=reported,
desired={"node_id": uid},
metadata={"node_id": uid},
replace=replace,
)
# root -> collector -> sub_root -> **rest
add_node("root", "graph_root")
add_node("collector", "cloud", replace=True)
add_node("sub_root", "foo")
add_edge("root", "collector")
add_edge("collector", "sub_root")
for o in range(0, width):
oid = str(o)
add_node(oid, "foo")
add_edge("sub_root", oid)
for i in range(0, width):
iid = f"{o}_{i}"
add_node(iid, "bla", node=to_json(Bla(iid, name=bla_text)))
add_edge(oid, iid)
return graph
def create_multi_collector_graph(width: int = 3) -> MultiDiGraph:
graph = MultiDiGraph()
def add_edge(from_node: str, to_node: str, edge_type: str = EdgeType.dependency) -> None:
key = GraphAccess.edge_key(from_node, to_node, edge_type)
graph.add_edge(from_node, to_node, key, edge_type=edge_type)
def add_node(node_id: str, kind: str, replace: bool = False) -> str:
reported = {**to_json(Foo(node_id)), "id": node_id, "name": node_id, "kind": kind}
graph.add_node(
node_id,
id=node_id,
reported=reported,
desired={},
metadata={},
hash="123",
replace=replace,
kind=kind,
kinds=[kind],
kinds_set={kind},
)
return node_id
root = add_node("root", "graph_root")
for cloud_num in range(0, 2):
cloud = add_node(f"cloud_{cloud_num}", "cloud")
add_edge(root, cloud)
for account_num in range(0, 2):
aid = f"{cloud_num}:{account_num}"
account = add_node(f"account_{aid}", "account")
add_edge(cloud, account)
add_edge(account, cloud, EdgeType.delete)
for region_num in range(0, 2):
rid = f"{aid}:{region_num}"
region = add_node(f"region_{rid}", "region", replace=True)
add_edge(account, region)
add_edge(region, account, EdgeType.delete)
for parent_num in range(0, width):
pid = f"{rid}:{parent_num}"
parent = add_node(f"parent_{pid}", "parent")
add_edge(region, parent)
add_edge(parent, region, EdgeType.delete)
for child_num in range(0, width):
cid = f"{pid}:{child_num}"
child = add_node(f"child_{cid}", "child")
add_edge(parent, child)
add_edge(child, parent, EdgeType.delete)
return graph
@pytest.fixture
def foo_kinds() -> List[Kind]:
base = ComplexKind(
"base",
[],
[
Property("identifier", "string", required=True),
Property("kind", "string", required=True),
Property("ctime", "datetime"),
],
)
foo = ComplexKind(
"foo",
["base"],
[
Property("name", "string"),
Property("some_int", "int32"),
Property("some_string", "string"),
Property("now_is", "datetime"),
Property("ctime", "datetime"),
Property("age", "trafo.duration_to_datetime", False, SyntheticProperty(["ctime"])),
],
)
bla = ComplexKind(
"bla",
["base"],
[
Property("name", "string"),
Property("now", "date"),
Property("f", "int32"),
Property("g", "int32[]"),
],
)
cloud = ComplexKind("cloud", ["foo"], [])
account = ComplexKind("account", ["foo"], [])
region = ComplexKind("region", ["foo"], [])
parent = ComplexKind("parent", ["foo"], [])
child = ComplexKind("child", ["foo"], [])
return [base, foo, bla, cloud, account, region, parent, child]
@pytest.fixture
def foo_model(foo_kinds: List[Kind]) -> Model:
return Model.from_kinds(foo_kinds)
@pytest.fixture
def test_db() -> StandardDatabase:
# Initialize the client for ArangoDB.
client = ArangoClient(hosts="http://localhost:8529")
# create test database: assumption is the root user with empty password
system = client.db("_system", username="root", password="root")
if not system.has_user("test"):
system.create_user("test", "test", True)
if not system.has_database("test"):
system.create_database("test", [{"username": "test", "password": "test", "active": True}])
# Connect to "test" database as "test" user.
return client.db("test", username="test", password="test")
@pytest.fixture
async def graph_db(test_db: StandardDatabase) -> ArangoGraphDB:
async_db = AsyncArangoDB(test_db)
graph_db = ArangoGraphDB(async_db, "ns", NoAdjust())
await graph_db.create_update_schema()
await async_db.truncate(graph_db.in_progress)
return graph_db
@pytest.fixture
async def filled_graph_db(graph_db: ArangoGraphDB, foo_model: Model) -> ArangoGraphDB:
if await graph_db.db.has_collection("model"):
graph_db.db.collection("model").truncate()
await graph_db.wipe()
await graph_db.merge_graph(create_graph("yes or no"), foo_model)
return graph_db
@pytest.fixture
async def event_graph_db(filled_graph_db: ArangoGraphDB, event_sender: AnalyticsEventSender) -> EventGraphDB:
return EventGraphDB(filled_graph_db, event_sender)
async def load_graph(db: GraphDB, model: Model, base_id: str = "sub_root") -> DiGraph:
blas = Query.by("foo", P("identifier") == base_id).traverse_out(0, Navigation.Max)
return await db.query_graph(QueryModel(blas.on_section("reported"), model))
@pytest.mark.asyncio
async def test_update_merge_batched(graph_db: ArangoGraphDB, foo_model: Model, test_db: StandardDatabase) -> None:
md = foo_model
await graph_db.wipe()
batch_id = "".join(SystemRandom().choice(string.ascii_letters) for _ in range(12))
g = create_graph("yes or no")
# empty database: all changes are written to a temp table
assert await graph_db.merge_graph(g, foo_model, batch_id, True) == (
["collector"],
GraphUpdate(112, 1, 0, 112, 0, 0),
)
assert len((await load_graph(graph_db, md)).nodes) == 0
# not allowed to commit an unknown batch
with pytest.raises(NoSuchChangeError):
await graph_db.commit_batch_update("does_not_exist")
# commit the batch and see the changes reflected in the database
await graph_db.commit_batch_update(batch_id)
assert len((await load_graph(graph_db, md)).nodes) == 111
# ensure that all temp tables are removed
assert len(list(filter(lambda c: c["name"].startswith("temp_"), test_db.collections()))) == 0
# create a new batch that gets aborted: make sure all temp tables are gone
batch_id = "will_be_aborted"
await graph_db.merge_graph(g, foo_model, batch_id, True)
await graph_db.abort_update(batch_id)
assert len(list(filter(lambda c: c["name"].startswith("temp_"), test_db.collections()))) == 0
@pytest.mark.asyncio
async def test_merge_graph(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
def create(txt: str, width: int = 10) -> MultiDiGraph:
return create_graph(txt, width=width)
p = ["collector"]
# empty database: all nodes and all edges have to be inserted, the root node is updated and the link to root added
assert await graph_db.merge_graph(create("yes or no"), foo_model) == (p, GraphUpdate(112, 1, 0, 112, 0, 0))
# exactly the same graph is updated: expect no changes
assert await graph_db.merge_graph(create("yes or no"), foo_model) == (p, GraphUpdate(0, 0, 0, 0, 0, 0))
# all bla entries have different content: expect 100 node updates, but no inserts or deletions
assert await graph_db.merge_graph(create("maybe"), foo_model) == (p, GraphUpdate(0, 100, 0, 0, 0, 0))
# the width of the graph is reduced: expect nodes and edges to be removed
assert await graph_db.merge_graph(create("maybe", width=5), foo_model) == (p, GraphUpdate(0, 0, 80, 0, 0, 80))
# going back to the previous graph: the same amount of nodes and edges is inserted
assert await graph_db.merge_graph(create("maybe"), foo_model) == (p, GraphUpdate(80, 0, 0, 80, 0, 0))
# updating with the same data again, does not perform any changes
assert await graph_db.merge_graph(create("maybe"), foo_model) == (p, GraphUpdate(0, 0, 0, 0, 0, 0))
@pytest.mark.asyncio
async def test_merge_multi_graph(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
# nodes:
# 2 collectors + 4 accounts + 8 regions + 24 parents + 72 children => 110 nodes to insert
# 1 root which changes => 1 node to update
# edges:
# 110 dependency, 108 delete connections (missing: collector -> root) => 218 edge inserts
nodes, info = await graph_db.merge_graph(create_multi_collector_graph(), foo_model)
assert info == GraphUpdate(110, 1, 0, 218, 0, 0)
assert len(nodes) == 8
# doing the same thing again should do nothing
nodes, info = await graph_db.merge_graph(create_multi_collector_graph(), foo_model)
assert info == GraphUpdate(0, 0, 0, 0, 0, 0)
assert len(nodes) == 8
@pytest.mark.asyncio
async def test_mark_update(filled_graph_db: ArangoGraphDB) -> None:
db = filled_graph_db
# make sure all changes are empty
await db.db.truncate(db.in_progress)
# change on 00 is allowed
assert await db.mark_update(["00"], ["0", "sub_root", "root"], "update 00", False) is None
# change on 01 is allowed
assert await db.mark_update(["01"], ["0", "sub_root", "root"], "update 01", True) is None
# same change id which tries to update the same subgraph root
with pytest.raises(InvalidBatchUpdate):
assert await db.mark_update(["01"], ["0", "sub_root", "root"], "update 01", True) is None
# change on 0 is rejected, since there are changes "below" this node
with pytest.raises(ConflictingChangeInProgress):
await db.mark_update(["0"], ["sub_root"], "update 0 under node sub_root", False)
# change on sub_root is rejected, since there are changes "below" this node
with pytest.raises(ConflictingChangeInProgress):
await db.mark_update(["sub_root"], ["root"], "update under node sub_root", False)
# clean up for later tests
await db.db.truncate(db.in_progress)
@pytest.mark.asyncio
async def test_query_list(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
blas = Query.by("foo", P("identifier") == "9").traverse_out().filter("bla", P("f") == 23)
async with await filled_graph_db.query_list(QueryModel(blas.on_section("reported"), foo_model)) as gen:
result = [from_js(x["reported"], Bla) async for x in gen]
assert len(result) == 10
foos_or_blas = parse_query("is([foo, bla])")
async with await filled_graph_db.query_list(QueryModel(foos_or_blas.on_section("reported"), foo_model)) as gen:
result = [x async for x in gen]
assert len(result) == 111 # 113 minus 1 graph_root, minus one cloud
@pytest.mark.asyncio
async def test_query_not(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
# select everything that is not foo --> should be blas
blas = Query.by(Query.mk_term("foo").not_term())
async with await filled_graph_db.query_list(QueryModel(blas.on_section("reported"), foo_model)) as gen:
result = [from_js(x["reported"], Bla) async for x in gen]
assert len(result) == 102
@pytest.mark.asyncio
async def test_query_graph(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
graph = await load_graph(filled_graph_db, foo_model)
assert len(graph.edges) == 110
assert len(graph.nodes.values()) == 111
# filter data and tag result, and then traverse to the end of the graph in both directions
around_me = Query.by("foo", P("identifier") == "9").tag("red").traverse_inout(start=0)
graph = await filled_graph_db.query_graph(QueryModel(around_me.on_section("reported"), foo_model))
assert len({x for x in graph.nodes}) == 12
assert GraphAccess.root_id(graph) == "sub_root"
assert list(graph.successors("sub_root"))[0] == "9"
assert set(graph.successors("9")) == {f"9_{x}" for x in range(0, 10)}
for node_id, node in graph.nodes.data(True):
if node_id == "9":
assert node["metadata"]["query_tag"] == "red"
else:
assert "tag" not in node["metadata"]
@pytest.mark.asyncio
async def test_query_aggregate(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
agg_query = parse_query("aggregate(kind: count(identifier) as instances): is(foo)").on_section("reported")
async with await filled_graph_db.query_aggregation(QueryModel(agg_query, foo_model)) as gen:
assert [x async for x in gen] == [{"group": {"reported.kind": "foo"}, "instances": 11}]
agg_combined_var_query = parse_query(
'aggregate("test_{kind}_{some_int}_{does_not_exist}" as kind: count(identifier) as instances): is("foo")'
).on_section("reported")
async with await filled_graph_db.query_aggregation(QueryModel(agg_combined_var_query, foo_model)) as g:
assert [x async for x in g] == [{"group": {"kind": "test_foo_0_"}, "instances": 11}]
@pytest.mark.asyncio
async def test_query_with_merge(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
query = parse_query('(merge_with_ancestors="foo as foobar,bar"): is("bla")')
async with await filled_graph_db.query_list(QueryModel(query, foo_model)) as cursor:
async for bla in cursor:
js = AccessJson(bla)
assert "bar" in js.reported # key exists
assert "bar" in js.desired # key exists
assert "bar" in js.metadata # key exists
assert js.reported.bar.is_none # bla is not a parent of this node
assert js.desired.bar.is_none # bla is not a parent of this node
assert js.metadata.bar.is_none # bla is not a parent of this node
assert js.reported.foobar is not None # foobar is merged into reported
assert js.desired.foobar is not None # foobar is merged into reported
assert js.metadata.foobar is not None # foobar is merged into reported
# make sure the correct parent is merged (foobar(1) -> bla(1_xxx))
assert js.reported.identifier.startswith(js.reported.foobar.identifier)
assert js.reported.identifier.startswith(js.desired.foobar.node_id)
assert js.reported.identifier.startswith(js.metadata.foobar.node_id)
@pytest.mark.asyncio
async def test_query_merge(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
q = parse_query(
"is(foo) --> is(bla) { "
"foo.bar.parents[]: <-[1:]-, "
"foo.child: -->, "
"walk: <-- -->, "
"bla.agg: aggregate(sum(1) as count): <-[0:]- "
"}"
)
async with await filled_graph_db.query_list(QueryModel(q, foo_model), with_count=True) as cursor:
assert cursor.count() == 100
async for bla in cursor:
b = AccessJson(bla)
assert b.reported.kind == "bla"
assert len(b.foo.bar.parents) == 4
for parent in b.foo.bar.parents:
assert parent.reported.kind in ["foo", "cloud", "graph_root"]
assert b.walk.reported.kind == "bla"
assert b.foo.child == AccessNone()
assert b.bla.agg == [{"count": 5}]
@pytest.mark.asyncio
async def test_query_with_clause(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
async def query(q: str) -> List[Json]:
agg_query = parse_query(q)
async with await filled_graph_db.query_list(QueryModel(agg_query.on_section("reported"), foo_model)) as cursor:
return [bla async for bla in cursor]
assert len(await query("is(bla) with(any, <-- is(foo))")) == 100
assert len(await query('is(bla) with(any, <-- is(foo) and identifier=~"1")')) == 10
assert len(await query("is(bla) with(empty, <-- is(foo))")) == 0
assert len(await query("is(bla) with(any, <-- is(bla))")) == 0
assert len(await query("is(bla) with(empty, <-- is(bla))")) == 100
assert len(await query('is(bla) with(count==1, <-- is(foo) and identifier=~"1")')) == 10
assert len(await query('is(bla) with(count==2, <-- is(foo) and identifier=~"1")')) == 0
assert len(await query("is(bla) with(any, <-- with(any, <-- is(foo)))")) == 100
@pytest.mark.asyncio
async def test_no_null_if_undefined(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
# imported graph should not have any desired or metadata sections
graph = create_graph("test", 0)
for _, node in graph.nodes(True):
del node["desired"]
del node["metadata"]
await graph_db.merge_graph(graph, foo_model)
async with await graph_db.query_list(QueryModel(parse_query("all"), foo_model)) as cursor:
async for elem in cursor:
assert "reported" in elem
assert "desired" not in elem
assert "metadata" not in elem
@pytest.mark.asyncio
async def test_get_node(filled_graph_db: ArangoGraphDB, foo_model: Model) -> None:
# load sub_root as foo
sub_root = to_foo(await filled_graph_db.get_node(foo_model, "sub_root"))
assert sub_root is not None
assert isinstance(sub_root, Foo)
# load node 7 as foo
node_7_json = await filled_graph_db.get_node(foo_model, "7")
node_7 = to_foo(node_7_json)
assert node_7 is not None
assert isinstance(node_7, Foo)
# make sure that all synthetic properties are rendered (the age should not be older than 1 second => 0s or 1s)
assert node_7_json[Section.reported]["age"] in ["0s", "1s"] # type: ignore
# load node 1_2 as bla
node_1_2 = to_bla(await filled_graph_db.get_node(foo_model, "1_2"))
assert node_1_2 is not None
assert isinstance(node_1_2, Bla)
@pytest.mark.asyncio
async def test_insert_node(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
json = await graph_db.create_node(foo_model, "some_new_id", to_json(Foo("some_new_id", "name")), "root")
assert to_foo(json).identifier == "some_new_id"
assert to_foo(await graph_db.get_node(foo_model, "some_new_id")).identifier == "some_new_id"
@pytest.mark.asyncio
async def test_update_node(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
await graph_db.create_node(foo_model, "some_other", to_json(Foo("some_other", "foo")), "root")
json = await graph_db.update_node(foo_model, "some_other", {"name": "bla"}, "reported")
assert to_foo(json).name == "bla"
assert to_foo(await graph_db.get_node(foo_model, "some_other")).name == "bla"
@pytest.mark.asyncio
async def test_update_nodes(graph_db: ArangoGraphDB, foo_model: Model) -> None:
def expect(jsons: List[Json], path: List[str], value: JsonElement) -> None:
for js in jsons:
v = value_in_path(js, path)
assert v is not None
assert v == value
await graph_db.wipe()
await graph_db.create_node(foo_model, "id1", to_json(Foo("id1", "foo")), "root")
await graph_db.create_node(foo_model, "id2", to_json(Foo("id2", "foo")), "root")
change1 = {"desired": {"test": True}}
result1 = [a async for a in graph_db.update_nodes(foo_model, {"id1": change1, "id2": change1})]
assert len(result1) == 2
expect(result1, ["desired", "test"], True)
change2 = {"metadata": {"test": True}}
result2 = [a async for a in graph_db.update_nodes(foo_model, {"id1": change2, "id2": change2})]
assert len(result2) == 2
expect(result2, ["metadata", "test"], True)
change3 = {"desired": {"test": True}, "metadata": {"test": True}, "reported": {"name": "test"}}
result3 = [a async for a in graph_db.update_nodes(foo_model, {"id1": change3, "id2": change3})]
assert len(result3) == 2
expect(result3, ["desired", "test"], True)
expect(result3, ["metadata", "test"], True)
expect(result3, ["reported", "name"], "test")
change4 = {"desired": None, "metadata": None}
result4 = [a async for a in graph_db.update_nodes(foo_model, {"id1": change4, "id2": change4})]
assert len(result4) == 2
assert "desired" not in result4
assert "metadata" not in result4
@pytest.mark.asyncio
async def test_delete_node(graph_db: ArangoGraphDB, foo_model: Model) -> None:
await graph_db.wipe()
await graph_db.create_node(foo_model, "sub_root", to_json(Foo("sub_root", "foo")), "root")
await graph_db.create_node(foo_model, "some_other_child", to_json(Foo("some_other_child", "foo")), "sub_root")
await graph_db.create_node(foo_model, "born_to_die", to_json(Foo("born_to_die", "foo")), "sub_root")
await graph_db.delete_node("born_to_die")
assert await graph_db.get_node(foo_model, "born_to_die") is None
with pytest.raises(AttributeError) as not_allowed:
await graph_db.delete_node("sub_root")
assert str(not_allowed.value) == "Can not delete node, since it has 1 child(ren)!"
@pytest.mark.asyncio
async def test_events(event_graph_db: EventGraphDB, foo_model: Model, event_sender: InMemoryEventSender) -> None:
await event_graph_db.create_node(foo_model, "some_other", to_json(Foo("some_other", "foo")), "root")
await event_graph_db.update_node(foo_model, "some_other", {"name": "bla"}, "reported")
await event_graph_db.delete_node("some_other")
await event_graph_db.merge_graph(create_graph("yes or no", width=1), foo_model)
await event_graph_db.merge_graph(create_graph("maybe", width=1), foo_model, "batch1", True)
# make sure all events will arrive
await asyncio.sleep(0.1)
# ensure the correct count and order of events
assert [a.kind for a in event_sender.events] == [
CoreEvent.NodeCreated,
CoreEvent.NodeUpdated,
CoreEvent.NodeDeleted,
CoreEvent.GraphMerged,
CoreEvent.BatchUpdateGraphMerged,
]
def to_json(obj: BaseResource) -> Json:
return {"kind": obj.kind(), **to_js(obj)}
def to_bla(json: Json) -> Bla:
return from_js(json["reported"], Bla)
def to_foo(json: Json) -> Foo:
return from_js(json["reported"], Foo)
|
from django.http import HttpResponse
def index(request):
return HttpResponse("Ola, mundo!")
|
# Copyright (C) 2013 Cisco Systems Inc.
# All rights reserved
#$Id: eor_utils.py,v 1.427 2013/06/24 23:56:03 venksrin Exp $
#ident $Source: /cvsroot/eor/systest/lib/eor_utils.py,v $ $Revision: 1.427 $
# Best Pratices for get() functions:
# 1. Use class rex as much as possible for standard regular expressions
# 2. Use underscore in keys wherever white-space appears in the output header
# 3. Add author name, description of function, sample usage examples and return value
# 4. Use python documentation format for #3 above, so that the documentation for all the functions can be pulled out easily
from nxapi_utils import NXAPITransport
import re
import collections
import string
import subprocess
import shlex
import sys, socket
import datetime
import time
MASKS=['0.0.0.0','128.0.0.0','192.0.0.0','224.0.0.0','240.0.0.0','248.0.0.0','252.0.0.0','254.0.0.0','255.0.0.0','255.128.0.0','255.192.0.0','255.224.0.0','255.240.0.0','255.248.0.0','255.252.0.0', '255.254.0.0', '255.255.0.0', '255.255.128.0', '255.255.192.0', '255.255.224.0', '255.255.240.0', '255.255.248.0', '255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128', '255.255.255.192', '255.255.255.224', '255.255.255.240', '255.255.255.248', '255.255.255.252', '255.255.255.254', '255.255.255.255']
def runNXAPIConf(cmd):
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_conf")
return output,msg,code
def runNXAPIShow(cmd):
xml_index = cmd.find("| xml")
if xml_index == -1:
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_show_ascii")
else:
cmd = cmd[:xml_index]
output,code,msg = NXAPITransport.send_cmd_int(cmd, "cli_show")
return output
def runVshCmdEx(cmd, _shell = False, _stdout = None):
output,error,status = runNXAPIConf(cmd)
return output,error,status
def cli_ex(cmd):
return runNXAPIShow(cmd)
class rex:
INTERFACE_TYPE="[Ff]ast[Ee]thernet|[Ff][Ee]th|[Gg]igabit[Ee]thernet|[Gg]ig[Ee]|[Ee]thernet|[Ee]th|[Tt]unnel ?|[Ll]oopback ?|[Pp]ort-channel ?|[Oo]verlay ?|[Nn]ull|[Mm]gmt|[Vv]lan ?|[Pp]o ?|[Ll]o ?|[Oo]vl ?|[Vv][Ll]|[Rr]epl|[Rr]eplicator|[Ff]as|[Ss]up-eth"
INTERFACE_NUMBER="[0-9]+/[0-9]+/[0-9]+|[0-9]+/[0-9]+|[0-9]+/[0-9]+\.[0-9]+|[0-9]+\.[0-9]+|[0-9]+|[0-9]+/[0-9]+/[0-9]+"
# INTERFACE_NAME="(?:{0})(?:{1})|[Nn]ull".format(INTERFACE_TYPE,INTERFACE_NUMBER)
INTERFACE_NAME='(?:(?:{0})(?:{1})|(?:[Nn]ull))'.format(INTERFACE_TYPE,INTERFACE_NUMBER)
INTERFACE_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(INTERFACE_NAME)
BCM_FP_INTERFACE='([Xx]e([0-9]+))'
BCM_FP_INTERFACE_RANGE='[Xx]e([0-9]+)-[Xx]e([0-9]+)'
PHYSICAL_INTERFACE_TYPE="[Ff]ast[Ee]thernet|[Ff][Ee]th|[Gg]igabit[Ee]thernet|[Gg]ig[Ee]|[Gg]i|[Ee]thernet|[Ee]th"
PHYSICAL_INTERFACE_NUMBER="[0-9]+/[0-9]+/[0-9]+|[0-9]+/[0-9]+|[0-9]+"
PHYSICAL_INTERFACE_NAME="(?:{0})(?:{1})".format(PHYSICAL_INTERFACE_TYPE,PHYSICAL_INTERFACE_NUMBER)
PHYSICAL_INTERFACE_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(PHYSICAL_INTERFACE_NAME)
DEVICE_TYPE='EOR|sTOR|N7K|N5K|N3K|itgen|fanout|UNKNOWN|NA'
FEX_MODEL='N2148T|N2232P|N2232TM-E|N2248TP-E|N2248T|NB22FJ|NB22HP'
FEX_INTERFACE_TYPE='{0}[0-9][0-9][0-9]/[0-9]+/[0-9]+'.format(PHYSICAL_INTERFACE_TYPE)
SWITCH_NAME = '[0-9A-Za-z_-]+'
#VLAN_RANGE = '[0-9]+(?:\-[0-9]+)?'
HEX="[0-9a-fA-F]+"
HEX_VAL="[x0-9a-fA-F]+"
MACDELIMITER="[\.:\-]"
# Following will match the following combinations
# Aa.Bb.Cc.Dd.Ee.Ff
# Aa-Bb-Cc-Dd-Ee-Ff
# Aa:Bb:Cc:Dd:Ee:Ff
# AaBb.CcDd.EeFf
# AaBb-CcDd-EeFf
# AaBb:CcDd:EeFf
MACADDR=HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+MACDELIMITER+HEX+HEX+"|"+HEX+HEX+HEX+HEX+MACDELIMITER+HEX+HEX+HEX+HEX+MACDELIMITER+HEX+HEX+HEX+HEX
IPv4_ADDR="[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"
IPv6_ADDR="[0-9A-Fa-f]+:[0-9A-Fa-f:]+"
LINK_LOCAL_IPv6_ADDR="fe80::[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+:[0-9A-Fa-f]+"
IP_ADDRESS="(?:(?:{0})|(?:{1}))".format(IPv4_ADDR,IPv6_ADDR)
NETADDR ='{0}/[0-9]+'.format(IPv4_ADDR)
NUM="[0-9]+"
BOOL="[01]"
DECIMAL_NUM="[0-9\.]+"
ALPHA="[a-zA-Z]+"
ALPHAUPPER="[A-Z]+"
ALPHALOWER="[a-z]+"
ALPHASPECIAL="[a-zA-Z_\-\.#/]+"
ALPHANUM="[a-zA-Z0-9]+"
ALPHANUMSPECIAL="[a-zA-Z0-9\-\._/]+"
SYSMGR_SERVICE_NAME = "[a-zA-Z0-9\-\._ ]+"
VRF_NAME="[a-zA-Z0-9_\-#]+"
ALL="?:[.\s]+"
#
# Number and time formats
#
VLAN_RANGE='(?:(?:{0}-[0-9]+|{0}-{0}|{0}),?)+'.format(NUM)
DATE = '[0-9]+\-[0-9]+\-[0-9]+'
U_TIME="[0-9]+\.[0-9]+"
CLOCK_TIME="[0-9]+[0-9]+:[0-9]+[0-9]+:[0-9]+[0-9]+"
HH_MM_SS="[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}"
TIME="(?:$U_TIME|$CLOCK_TIME)"
MONTH="Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"
YEAR="[12]+[0-9][0-9][0-9]"
UPTIME="(?:\d+[dwmy]\d+[hdwm]|\d+:\d+:\d+|\d+\.\d+)"
XPTIME="(?:\d+:\d+:\d+|\d+\.\d+|never)"
LC_STATUS='(?:pwr-?denied|err-?pwd-?dn|pwr-?cycle?d|upgrading|powered-?up|powered-?dn|failure|initializing|testing|ok)'
LC_MODEL='(?:N7K-F2-?48X[PT]-?\d+[E]*| +|Cortina-Test-LC|N9k-X9636PQ)'
FC_MODEL='(?:N7K-C[0-9]+-FAB-?\d+|N/A| +)'
LC_MODULE_TYPE='(?:[0-9]+/[0-9]+ Gbps (?:BASE-T )?Ethernet Module|Cortina-Test-LC|Snowbird|Seymour)'
FC_MODULE_TYPE='(?:Fabric Module(?: [0-9]+)?|Sierra|Shasta)'
VLAN_STATUS='active|suspended|act.lshut'
#Verify_list defined for stimuli classes
VERIFY_LIST=['none','all','traffic','l2_unicast_pi','l3_unicast_pi','l2_multicast_pi','l3_multicast_pi','l2_unicast_pd','l3_unicast_pd','l2_multicast_pd','l3_multicast_pd','system','exception','vpc_consistency']
TRIGGER_VERIFY_LIST=['traffic','none','all']
# To be depreceated, use strTolist instead
# Usages strtolist('1,2,3')
# strtolist('1 2 3')
# strtolist('1, 2, 3')
# All three will return list of ['1',2,'3']
def strtolist(inputstr,retainint=False):
inputstr=str(inputstr)
inputstr=inputstr.strip("[]")
splitbycomma=inputstr.split(",")
splitbyspace=inputstr.split()
if len(splitbycomma) >= 2:
returnlist=[]
for elem in splitbycomma:
elem=elem.strip(" '")
elem=elem.strip('"')
if elem.isdigit() and retainint:
returnlist.append(int(elem))
else:
returnlist.append(elem)
return returnlist
returnlist=[]
for elem in splitbyspace:
elem=elem.strip(" '")
elem=elem.strip('"')
if elem.isdigit() and retainint:
returnlist.append(int(elem))
else:
returnlist.append(elem)
return returnlist
def normalizeInterfaceName(log, interface):
in_type=type(interface)
pattern1='[Ee]thernet|[Ee]th|[Ee]t'
pattern2='[Vv]lan|[Vv]l'
pattern3='[Pp]ort-channel|[Pp]ortchannel|[Pp]o'
pattern4='[Ll]oopback|[Ll]oop-back|[Ll]o'
if (in_type == str):
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',interface)
interface=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',interface)
if (in_type == list):
for int in interface:
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',int)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',tmp)
interface[interface.index(int)]=tmp
if (in_type == tuple):
int_list=list(interface)
for int in int_list:
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',int)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',tmp)
tmp=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',tmp)
int_list[int_list.index(int)]=tmp
interface=tuple(int_list)
if (in_type == dict):
dct={}
for key in interface.keys():
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern1,rex.INTERFACE_NUMBER),r'Eth\1',key)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern2,rex.INTERFACE_NUMBER),r'Vlan\1',int)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern3,rex.INTERFACE_NUMBER),r'Po\1',int)
int=re.sub(r'(?:{0})((?:{1}))'.format(pattern4,rex.INTERFACE_NUMBER),r'Lo\1',int)
tmp={int:interface[key]}
dct.update(tmp)
interface=dct
return interface
def convertListToDict(table,columns=[],keys=None,keytype="tuple"):
# Returns dictionary based on given list & columns
# If it is a list, each column is a key
# If it is a list of lists, then first level keys are passed keys argument
# and columns is second level key
returnDict = collections.OrderedDict()
if keys:
keyIndexes = []
if "split" in dir(keys):
keys=keys.split()
for key in keys:
keyIndexes.append(columns.index(key))
valueIndex=-1
if len(columns) - len(keys) == 1:
for i in range(len(columns)):
if not i in keyIndexes:
valueIndex=i
break
for row in table:
key=""
keyitems=[]
initial=True
for keyIndex in keyIndexes:
interface=""
temp=re.match(rex.INTERFACE_NAME,row[keyIndex])
if temp and temp.group(0) == row[keyIndex]:
interface=normalizeInterfaceName("",row[keyIndex])
if initial:
if interface == "":
key = key + row[keyIndex]
else:
key = key + interface
initial=False
else:
if interface == "":
key = key + " " + row[keyIndex]
else:
key = key + " " + interface
if interface == "":
keyitems.append(row[keyIndex])
else:
keyitems.append(interface)
if keytype == "tuple" and len(keys) > 1:
key=tuple(keyitems)
returnDict[key] = collections.OrderedDict()
if valueIndex == -1:
for i in range(len(columns)):
if not i in keyIndexes:
temp=re.match(rex.INTERFACE_NAME,row[i].strip())
if temp and temp.group(0) == row[i].strip():
returnDict[key][columns[i]]=normalizeInterfaceName("",row[i].strip())
else:
returnDict[key][columns[i]] = row[i].strip()
else:
temp=re.match(rex.INTERFACE_NAME,row[valueIndex].strip())
if temp and temp.group(0) == row[valueIndex].strip():
returnDict[key]=normalizeInterfaceName("",row[valueIndex].strip())
else:
returnDict[key] = row[valueIndex]
else:
#Single level dictionary need to handle 6 different use cases
#eor_utils.convertListToDict(['x','y','z'],['a','b','c'])
#eor_utils.convertListToDict([],['a','b','c'])
#eor_utils.convertListToDict(['x','y'],['a','b','c'])
#eor_utils.convertListToDict([('x','y','z')],['a','b','c'])
#eor_utils.convertListToDict([('x','y'),('c','d')],['a','b'])
#eor_utils.convertListToDict([('x','y'),('c','d')])
if len(table):
if len(columns) == len(table) and not re.search('tuple',str(type(table[0]))):
for key in columns:
temp=re.match(rex.INTERFACE_NAME,table[columns.index(key)])
if temp and temp.group(0) == table[columns.index(key)]:
returnDict[key]=normalizeInterfaceName("",table[columns.index(key)])
else:
returnDict[key]=table[columns.index(key)]
elif len(table) == 1 and len(table[0]) == len(columns) and re.search('tuple',str(type(table[0]))):
for key in columns:
temp=re.match(rex.INTERFACE_NAME,table[0][columns.index(key)])
if temp and temp.group(0) == table[0][columns.index(key)]:
returnDict[key]=normalizeInterfaceName("",table[0][columns.index(key)])
else:
returnDict[key]=table[0][columns.index(key)]
elif (len(columns) == 2 or len(columns) == 0)and re.search('tuple',str(type(table[0]))):
for row in table:
if len(row) == 2:
temp=re.match(rex.INTERFACE_NAME,row[1])
if temp and temp.group(0) == row[1]:
returnDict[row[0]]=normalizeInterfaceName("",row[1])
else:
returnDict[row[0]]=row[1]
else:
return collections.OrderedDict()
return returnDict
def getUnwrappedBuffer(buffer,delimiter=" "):
# Returns a string
# If output has wrapped lines as follows (port-channel summary)
# "21 Po21(SU) Eth NONE Eth2/11(P) Eth2/12(D)
# 22 Po22(SU) Eth NONE Eth1/1(P) Eth1/2(P) Eth1/3(P)
# Eth1/4(P)
# 101 Po101(SD) Eth NONE Eth2/1(D) Eth2/2(D)"
# This converts to
# "21 Po21(SU) Eth NONE Eth2/11(P) Eth2/12(D)
# 22 Po22(SU) Eth NONE Eth1/1(P) Eth1/2(P) Eth1/3(P) Eth1/4(P)
# 101 Po101(SD) Eth NONE Eth2/1(D) Eth2/2(D)"
#
# This helps to write get procedures with everyoutput being a single line
# and makes regular expressions seamless independent of wrapped output
previousline=""
lines=[]
returnbuffer = ""
buffer=re.sub("\r","",buffer)
for line in buffer.split("\n"):
wrappedline=re.findall("^[ \t]+(.*)",line,flags=re.I)
if len(wrappedline) > 0:
previousline = previousline + delimiter + re.sub("\r\n","",wrappedline[0])
else:
if (previousline != ""):
returnbuffer = returnbuffer + previousline + "\n"
previousline=re.sub("[\r\n]+","",line)
if (previousline != ""):
returnbuffer = returnbuffer + previousline + "\n"
return returnbuffer
def getVlanDict(vlan):
cmd = "show vlan id " + vlan
showoutput=cli_ex(cmd)
vlanmemberlist=re.findall("("+rex.NUM+")[ \t]+("+rex.ALPHANUM+")[ \t]+("+rex.VLAN_STATUS+")[ \t]+(.*)",getUnwrappedBuffer(showoutput,", "),flags=re.I|re.M)
vlanmemberdict=convertListToDict(vlanmemberlist,['VLAN','Name','Status','Ports'],['VLAN'])
return vlanmemberdict
"""This scrpit should not contain any thing other than enums"""
class IfType():
Ethernet = 1
PortChannel = 2
Internal = 3
Cpu = 4
def replace_output(_lines, _find_word, _replace_word):
hw_name = _find_word
new_lines = []
for line in _lines:
x = re.sub(r'\b%s\b'%(hw_name), _replace_word, line)
new_lines.append(x)
return new_lines
class createHwTableObject(object):
""" Class to parse the broadcom table outputs and convert to dictionary format. Expects the
input as 'Index: <Row>' where the <Row> is in key value pairs separated by commas"""
def __init__( self, bcm_cmd_dump ):
import re
self.table=collections.OrderedDict()
table_rows=bcm_cmd_dump.split('\n')
for row in table_rows:
if "d chg" in row:
continue
if ":" not in row:
continue
if "Private image version" in row:
continue
(row_key, row_value)=row.split(': ')
(row_key, row_value)=row.split(': ')
value_row=row_value.rstrip('\r').lstrip('<').rstrip('>')
self.table[row_key]=collections.OrderedDict()
for data_params in value_row.split(','):
if len(data_params) == 0:
continue
(data_key,data_value)=data_params.split('=')
self.table[row_key][data_key]=data_value
#print('Table Data', self.table )
def getSpanningTreeVlanPortStateDict(vlan):
cmd = "show spanning-tree " + vlan
showoutput=cli_ex(cmd)
stplist=re.findall("^([^ \t]+)[ \s]+([^ \t]+)[ \s]+([A-Za-z]+)[ \s]+([0-9]+)[ \s]+\
([^ \t]+)[ \s]+([^ \t]+)[ \s\r\n]+",showoutput,flags=re.I|re.M)
if stplist:
# if vlan port state is found
stpdict=convertListToDict(stplist,['vlan','role','state','cost','prio.nbr','type'])
log.info(" STP state for " + \
parserutils_lib.argsToCommandOptions(args,arggrammar,log,"str") + " is : " + str(stpdict))
return stpdict
def getShowSpanningTreeDict( vlan ):
show_stp_dict=collections.OrderedDict()
# Define the Regexp Patterns to Parse ..
root_params_pat_non_root='\s+Root ID\s+Priority\s+([0-9]+)\r\n\s+Address\s+({0})\r\n\s+Cost\s+([0-9]+)\r\nPort\s+([0-9]+)\s+\(([a-zA-Z0-9\-]+)\)\r\n\s+Hello Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9]+)\s+sec\s+Forward\s+Delay\s+([0-9]+)\s+sec\r\n'.format(rex.MACADDR)
root_params_pat_root='\s+Root ID\s+Priority\s+([0-9]+)\r\n\s+Address\s+({0})\r\n\s+This bridge is the root\r\n\s+Hello Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9]+)\s+sec\s+Forward\s+Delay\s+([0-9]+)\s+sec\r\n'.format(rex.MACADDR)
bridge_params_pat='\s+Bridge ID\s+Priority\s+([0-9]+)\s+\(priority\s+([0-9]+)\s+sys-id-ext ([0-9]+)\)\r\n\s+Address\s+({0})\r\n\s+Hello\s+Time\s+([0-9]+)\s+sec\s+Max\s+Age\s+([0-9+)\s+sec\s+Forward Delay\s+([0-9]+) sec\r\n'.format(rex.MACADDR)
#interface_params_pat='-------\r\n({0})\s+([a-zA-Z]+)\s+([A-Z]+)\s+([0-9]+)\s+([0-9]+).([0-9]+)\s+([\(\)a-zA-Z0-9\s]+)\r'.format(rex.INTERFACE_NAME)
interface_params_pat='({0})\s+([a-zA-Z]+)\s+([A-Z]+)[\*\s]+([0-9]+)\s+([0-9]+).([0-9]+)\s+'.format(rex.INTERFACE_NAME)
# Build the command to be executed based on the arguments passed ..
cmd = 'show spanning-tree '
cmd = cmd + 'vlan ' + str(vlan)
show_stp=cli_ex(cmd)
# Split the output of STP based on VLAN
show_stp_vlan_split=show_stp.split('VLAN')
# Iterate over every VLAN block and build the show_stp_dict
for stp_vlan in show_stp_vlan_split:
if re.search( '^([0-9]+)', stp_vlan ):
#removed backslash r
match=re.search( '^([0-9]+)\n\s+Spanning tree enabled protocol ([a-z]+)', stp_vlan, re.I )
vlan_id = int(match.group(1))
stp_mode = match.group(2)
show_stp_dict[vlan_id]={}
show_stp_dict[vlan_id]['stp_mode']=stp_mode
if re.search( root_params_pat_root, stp_vlan, re.I ):
root_info=re.findall( root_params_pat_root, stp_vlan, re.I )
show_stp_dict[vlan_id]['root_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
show_stp_dict[vlan_id]['root']=True
else:
root_info=re.findall( root_params_pat_non_root, stp_vlan, re.I )
show_stp_dict[vlan_id]['root_info']=convertListToDict( root_info, ['Priority','Address','Cost', \
'Port','Hello Time','Max Age','Forward Delay'], ['Priority','Address','Cost', 'Port'])
show_stp_dict[vlan_id]['root']=False
bridge_info=re.findall( bridge_params_pat, stp_vlan, re.I )
show_stp_dict[vlan_id]['bridge_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
intf_info=re.findall( interface_params_pat, stp_vlan, re.I )
show_stp_dict[vlan_id]['Interface_info']=convertListToDict( intf_info, [ 'Interface', 'Role', 'Status', \
'Cost', 'Prio', 'Nbr' ] , [ 'Interface' ] )
# Split the output of STP based on MST
show_stp_mst_split=show_stp.split('MST')
for mst_id in show_stp_mst_split:
if re.search( '^([0-9]+)', mst_id):
#removed backslash r
match=re.search( '^([0-9]+)\n\s+Spanning tree enabled protocol ([a-z]+)', mst_id, re.I )
mst = vlan
stp_mode = match.group(2)
show_stp_dict[mst]={}
show_stp_dict[mst]['stp_mode']=stp_mode
if re.search( root_params_pat_root, mst_id, re.I ):
root_info=re.findall( root_params_pat_root, mst_id, re.I )
show_stp_dict[mst]['root_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
show_stp_dict[mst]['root']=True
else:
root_info=re.findall( root_params_pat_non_root, mst_id, re.I )
show_stp_dict[mst]['root_info']=convertListToDict( root_info, ['Priority','Address','Cost', \
'Port','Hello Time','Max Age','Forward Delay'], ['Priority','Address','Cost', 'Port'])
show_stp_dict[mst]['root']=False
bridge_info=re.findall( bridge_params_pat, mst_id, re.I )
show_stp_dict[mst]['bridge_info']=convertListToDict( root_info, ['Priority','Address', \
'Hello Time','Max Age','Forward Delay'], ['Priority','Address'])
intf_info=re.findall( interface_params_pat, mst_id, re.I )
show_stp_dict[mst]['Interface_info']=convertListToDict( intf_info, [ 'Interface', 'Role', 'Status', \
'Cost', 'Prio', 'Nbr' ] , [ 'Interface' ] )
return show_stp_dict
def pprint_table(out, table):
"""Prints out a table of data, padded for alignment
@param out: Output stream (file-like object)
@param table: The table to print. A list of lists.
Each row must have the same number of columns. """
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(get_max_width(table, i))
for row in table:
# left col
print >> out, row[0].ljust(col_paddings[0] + 1),
# rest of the cols
for i in range(1, len(row)):
col = format_num(row[i]).rjust(col_paddings[i] + 2)
print >> out, col,
print >> out
def validateIP(ip):
try:
socket.inet_aton(ip)
return 0
except socket.error:
return 1
def convertIP(ip):
hexIP = []
[hexIP.append(hex(int(x))[2:].zfill(2)) for x in ip.split('.')]
hexIP = "0x" + "".join(hexIP)
return hexIP
class createEventHistoryTableObject(object):
""" Class to parse the event history outputs and convert to dictionary format. Expects the
input as 'Index: <Row>' where the <Row> is in key value pairs separated by commas"""
def __init__( self, event_history_dump ):
import re
time_format = "at %f usecs after %a %b %d %H:%M:%S %Y"
self.table=[]
table_rows=event_history_dump.split('\n')
new = {}
esq_req_rsp = {}
esqs = []
esq_start = []
req_rsp = True
for row in table_rows:
if "FSM" in row:
continue
if ":" not in row:
continue
if "Previous state:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "Triggered event:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "Next state:" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "ESQ_START" in row:
if req_rsp == False:
esq_start.append(esq_req_rsp)
req_rsp = True
esq_req_rsp = {}
if len(esq_start) > 0:
esqs.append(esq_start)
esq_start = []
continue
if "ESQ_REQ" in row or "ESQ_RSP" in row:
old = esq_req_rsp
esq_req_rsp = {}
if len(old) > 0:
esq_start.append(old)
req_rsp = True
if "usecs after" in row:
y = row.split(',')[1].strip()
t = datetime.datetime.strptime(y, time_format)
esq_req_rsp['TIME'] = t
esq_req_rsp['TIME_STRING'] = row
kvpairs = row.split(',')
for val in kvpairs:
x = val.strip(' ').strip('\r').split(':')
if len(x) != 2:
continue
(tk, tv)=val.split(':')
row_key = tk.strip(' ')
row_value = tv.strip(' ')
req_rsp = False
esq_req_rsp[row_key]=row_value
if req_rsp == False:
esq_start.append(esq_req_rsp)
esqs.append(esq_start)
self.table = esqs
|
Experiment(description='For debugging changepoints',
data_dir='../data/debug/',
max_depth=3,
random_order=False,
k=1,
debug=False,
local_computation=True,
n_rand=1,
sd=2,
jitter_sd=0.1,
max_jobs=300,
verbose=False,
make_predictions=True,
skip_complete=False,
results_dir='../results/debug-changepoint/',
iters=250,
base_kernels='SE',
random_seed=1,
period_heuristic=5,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()', # Starting model
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='pl2',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),\
('A', 'B', {'A': 'kernel', 'B': 'base'}),\
('A', ('None',), {'A': 'kernel'}),\
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'})])
|
from django.contrib import admin
from . import models
# Register your models here.
@admin.register(models.Image)
class ImageAdmin(admin.ModelAdmin):
list_display_link = (
'location',
'caption',
)
search_fields = (
'location',
'caption',
)
list_filter = (
'location',
'creator',
)
list_display = (
'file',
'location',
'caption',
'creator',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'image',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'image',
'created_at',
'updated_at',
)
|
#!/usr/bin/env python
import threading
import unittest
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY)
import tests
class TransactionTests(unittest.TestCase):
def setUp(self):
self.conn = psycopg2.connect(tests.dsn)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
# The constraint is set to deferrable for the commit_failed test
curs.execute('''
CREATE TEMPORARY TABLE table2 (
id int PRIMARY KEY,
table1_id int,
CONSTRAINT table2__table1_id__fk
FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''')
curs.execute('INSERT INTO table1 VALUES (1)')
curs.execute('INSERT INTO table2 VALUES (1, 1)')
self.conn.commit()
def tearDown(self):
self.conn.close()
def test_rollback(self):
# Test that rollback undoes changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.rollback()
self.assertEqual(self.conn.status, STATUS_READY)
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [])
def test_commit(self):
# Test that commit stores changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.commit()
self.assertEqual(self.conn.status, STATUS_READY)
# Now rollback and show that the new record is still there:
self.conn.rollback()
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [(2, 1)])
def test_failed_commit(self):
# Test that we can recover from a failed commit.
# We use a deferred constraint to cause a failure on commit.
curs = self.conn.cursor()
curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED')
curs.execute('INSERT INTO table2 VALUES (2, 42)')
# The commit should fail, and move the cursor back to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.assertRaises(psycopg2.IntegrityError, self.conn.commit)
self.assertEqual(self.conn.status, STATUS_READY)
# The connection should be ready to use for the next transaction:
curs.execute('SELECT 1')
self.assertEqual(curs.fetchone()[0], 1)
class DeadlockSerializationTests(unittest.TestCase):
"""Test deadlock and serialization failure errors."""
def connect(self):
conn = psycopg2.connect(tests.dsn)
conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
return conn
def setUp(self):
self.conn = self.connect()
curs = self.conn.cursor()
# Drop table if it already exists
try:
curs.execute("DROP TABLE table1")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
try:
curs.execute("DROP TABLE table2")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
# Create sample data
curs.execute("""
CREATE TABLE table1 (
id int PRIMARY KEY,
name text)
""")
curs.execute("INSERT INTO table1 VALUES (1, 'hello')")
curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)")
self.conn.commit()
def tearDown(self):
curs = self.conn.cursor()
curs.execute("DROP TABLE table1")
curs.execute("DROP TABLE table2")
self.conn.commit()
self.conn.close()
def test_deadlock(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
step1.set()
step2.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
step2.set()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
def test_serialisation_failure(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("SELECT name FROM table1 WHERE id = 1")
curs.fetchall()
step1.set()
step2.wait()
curs.execute("UPDATE table1 SET name='task1' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("UPDATE table1 SET name='task2' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
class QueryCancellationTests(unittest.TestCase):
"""Tests for query cancellation."""
def setUp(self):
self.conn = psycopg2.connect(tests.dsn)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
def test_statement_timeout(self):
curs = self.conn.cursor()
# Set a low statement timeout, then sleep for a longer period.
curs.execute('SET statement_timeout TO 10')
self.assertRaises(psycopg2.extensions.QueryCanceledError,
curs.execute, 'SELECT pg_sleep(50)')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
#%%
import traceback
import threading
import queue
import cv2
import time
from .cvtrace import PathManager, GcodeWriter
class TracerWorker(threading.Thread):
def __init__(self, parent):
# Daemon but we'll still try to shut down nicely
threading.Thread.__init__(self, daemon=True)
self.inbox = queue.Queue()
self.parent = parent
self.xyz = None
def run(self):
while True:
message = self.inbox.get() # wait until message is available
cmd = message[0]
self.log_info(f"Got command {cmd}")
if cmd == "start_trace":
self.trace_routine()
self.log_info("Worker done with trace_routine")
elif cmd == "cancel":
self.log_info("Got cancel request while not running, ignoring")
elif cmd == "continue":
self.log_info("Got continue request while not running, ignoring")
elif cmd == "position":
self.log_info("Got position request while not running, ignoring")
elif cmd == "shutdown":
self.log_info("Got shutdown request while not running")
break
else:
self.log_info(f"Worker got unrecognized command: {cmd}")
self.log_info("Done running")
def delay_command(self):
delay_ms = round(self.parent._settings.getFloat(["scan_delay"]) * 1000)
return f"G4 P{delay_ms}"
def wait(self, delay):
self.commands("G4 P1")
self.commands("M117 TRACER")
while True:
try:
message = self.inbox.get(timeout=delay)
except queue.Empty:
message = ("timeout",) # timed out waiting for message
cmd = message[0]
if cmd == "cancel":
self.log_info("Worker received cancel request")
raise Exception("cancel")
elif cmd == "timeout":
self.log_info("Worker encountered timeout waiting")
raise Exception("timeout")
elif cmd == "shutdown":
self.log_info("Worker received shutdown request")
self.post(("shutdown",))
raise Exception("shutdown")
elif cmd == "continue":
self.log_info("Worker continuing")
return
elif cmd == "position":
if self.xyz is None:
self.xyz = (message[1], message[2], message[3])
else:
self.log_info("Worker got unexpected xyz, ignoring")
# repeat loop, process more messages
else:
self.log_info(f"Worker got unexpected command {cmd}, ignoring")
# repeat loop, process more messages
def trace_routine(self):
try:
data_folder = self.parent.get_plugin_data_folder()
self.xyz = None
self.commands("M114")
self.commands("G90")
self.wait(5)
if self.xyz is None:
self.log_info("Worker did not get xyz, aborting")
return
xyz = self.xyz # keep local copy
pm = PathManager()
img = self.parent.getImage()
imgcount = 1
cv2.imwrite(f"{data_folder}/img{imgcount}.png", img)
st, nextx, nexty = pm.addCapture(img, xyz[0], xyz[1])
while st == "more":
if imgcount > 50:
self.log_info("exceeded capture limit, aborting")
return
self.log_info(f"Capture more, next at {nextx} {nexty}")
self.commands(f"G1 X{nextx:.2f} Y{nexty:.2f} F800")
self.commands(self.delay_command())
self.wait(5)
img = self.parent.getImage()
imgcount = imgcount + 1
cv2.imwrite(f"{data_folder}/img{imgcount}.png", img)
st, nextx, nexty = pm.addCapture(img, nextx, nexty)
self.log_info("finalizing")
# contour settings
cset = dict(
tool_offs_x = self.parent._settings.getFloat(["tool_offs_x"]),
tool_offs_y = self.parent._settings.getFloat(["tool_offs_y"]),
tool_diam = self.parent._settings.getFloat(["tool_diam"]),
cut_offset = self.parent._settings.getFloat(["cut_offset"]),
cut_hole = self.parent._settings.get_boolean(["cut_hole"]),
cut_climb = self.parent._settings.get_boolean(["cut_climb"]),
)
# for get_contours, settings must have tool_offs_x, tool_offs_y,
# tool_diam, cut_offset, cut_hole (bool), cut_climb (bool)
contours_xy = pm.get_contours(cset, outfolder=data_folder)
self.log_info("completed contour extraction")
# g-code generation settings
gset = dict(
cut_depth = self.parent._settings.getFloat(["cut_depth"]),
cut_feedrate = self.parent._settings.getFloat(["cut_feedrate"]),
)
gw = GcodeWriter(contours_xy[0], gset)
now_str = time.strftime('%Y-%m-%d_%H_%M_%S')
self.parent.lfs.add_file(f"trace_{now_str}.gcode", gw)
self.log_info("done")
except Exception:
self.log_info(f"Worker got exception {traceback.format_exc()}, aborting")
def commands(self, gcode):
self.log_info(f"sending gcode to parent: {gcode}")
self.parent._printer.commands(gcode)
def log_info(self, string):
self.parent._logger.info(string)
def post(self, message):
# someone else wants to post to my inbox
self.inbox.put(message)
|
# Copyright © 2017 Ondrej Martinsky, All rights reserved
# http://github.com/omartinsky/pybor
def assertRaisesMessage(exception_class, lambda_function, message_substring):
try:
lambda_function()
except exception_class as ex:
msg = ex.args[0]
if message_substring not in msg:
raise BaseException("Unexpected exception string '%s'" % msg)
def assert_type(obj, expected_type, allowNone=False):
assert (allowNone and obj is None) or isinstance(obj, expected_type), "Unexpected type %s" % str(type(obj))
def enum_values(enum_class):
return enum_class._member_map_
def enum_values_as_string(enum_class):
return ','.join([x for x in enum_values(enum_class)])
def enum_from_string(enum_class, string_representation):
if string_representation in enum_class._member_map_:
return enum_class._member_map_[string_representation]
else:
raise BaseException("Unable to convert '%s' to %s. Possible values are: %s" %
(string_representation, str(enum_class), enum_values_as_string(enum_class)))
def assert_equal(l, r):
assert l == r, "%s != %s" % (str(l), str(r))
def coalesce(*arg):
for el in arg:
if el is not None:
return el
return None
|
# -*- coding: utf-8 -*-
"""
Utilities to enable exception reraising across the master commands
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import salt libs
import salt.exceptions
import salt.utils.event
# Import 3rd-party libs
from salt.ext.six.moves import builtins as exceptions
def raise_error(name=None, args=None, message=""):
"""
Raise an exception with __name__ from name, args from args
If args is None Otherwise message from message\
If name is empty then use "Exception"
"""
name = name or "Exception"
if hasattr(salt.exceptions, name):
ex = getattr(salt.exceptions, name)
elif hasattr(exceptions, name):
ex = getattr(exceptions, name)
else:
name = "SaltException"
ex = getattr(salt.exceptions, name)
if args is not None:
raise ex(*args)
else:
raise ex(message)
def pack_exception(exc):
if hasattr(exc, "pack"):
packed_exception = exc.pack()
else:
packed_exception = {"message": exc.__unicode__(), "args": exc.args}
return packed_exception
def fire_exception(exc, opts, job=None, node="minion"):
"""
Fire raw exception across the event bus
"""
if job is None:
job = {}
event = salt.utils.event.SaltEvent(node, opts=opts, listen=False)
event.fire_event(pack_exception(exc), "_salt_error")
|
from flask import url_for
import meowbot
from meowbot.triggers import SimpleResponseCommand, trigger_registry, BaseCommand
from meowbot.conditions import IsCommand
from meowbot.context import CommandContext
class Help(SimpleResponseCommand):
condition = IsCommand(["help"])
help = "`help`: shows all commands, or help for a particular command"
def get_message_args(self, context: CommandContext):
if context.args:
name = context.args[0].lower()
for trigger in trigger_registry:
if (
issubclass(trigger, BaseCommand)
and not getattr(trigger, "private", False)
and (name in trigger.condition._aliases)
):
text = trigger().get_help(context)
return {"text": text}
else:
text = f"`{name}` is not a valid command"
return {"text": text}
else:
commands = {
trigger.condition._name: trigger
for trigger in trigger_registry
if issubclass(trigger, BaseCommand)
and isinstance(trigger.condition, IsCommand)
and not getattr(trigger, "private", False)
}
attachment = {
"pretext": "Available commands are:",
"fallback": ", ".join(sorted(commands)),
"fields": [
{"value": commands[name]().get_help(context)}
for name in sorted(commands)
],
"footer": "<{}|meowbot {}> | For more help, join "
"#meowbot_control".format(
url_for("main.index", _external=True), meowbot.__version__
),
"footer_icon": url_for(
"static", filename="meowbot_thumb.jpg", _external=True
),
}
return {"attachments": [attachment], "thread_ts": context.event.ts}
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for the base class for calibration-type experiments."""
from test.base import QiskitExperimentsTestCase
from qiskit_experiments.library import QubitSpectroscopy
from qiskit_experiments.calibration_management.calibrations import Calibrations
from qiskit_experiments.calibration_management.base_calibration_experiment import (
BaseCalibrationExperiment,
)
class TestBaseCalibrationClass(QiskitExperimentsTestCase):
"""Tests for base calibration experiment classes."""
def test_class_order(self):
"""Test warnings when the BaseCalibrationExperiment is not the first parent."""
class CorrectOrder(BaseCalibrationExperiment, QubitSpectroscopy):
"""A class with the correct order should not produce warnings.."""
def __init__(self):
"""A dummy class for parent order testing."""
super().__init__(Calibrations(coupling_map=[]), 0, [0, 1, 2])
CorrectOrder()
with self.assertWarns(Warning):
# pylint: disable=unused-variable
class WrongOrder(QubitSpectroscopy, BaseCalibrationExperiment):
"""Merely defining this class is enough to raise the warning."""
def __init__(self):
"""A dummy class for parent order testing."""
super().__init__(Calibrations(coupling_map=[]), 0, [0, 1, 2])
|
import os
import torch
import torch.utils.data as data
from PIL import Image
import tifffile as tiff
import os
import cv2
import numpy as np
import os.path
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(
self,
root: str,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self) -> str:
return ""
class StandardTransform(object):
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform,
"Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform,
"Target transform: ")
return '\n'.join(body)
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
Args:
directory (str): root dataset directory
class_to_idx (Dict[str, int]): dictionary mapping class name to class index
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/[...]/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/[...]/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file)
def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def image_loader(path: str) -> Image.Image:
# img = cv2.imread(path)
img = tiff.imread(path).astype(np.float32)
return img
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return image_loader(path)
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return image_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/[...]/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
|
from rest_framework.renderers import JSONRenderer
class DatatablesRenderer(JSONRenderer):
media_type = 'application/json'
format = 'datatables'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
request = renderer_context['request']
new_data = {}
view = renderer_context.get('view')
if 'recordsTotal' not in data:
# pagination was not used, let's fix the data dict
if 'results' in data:
results = data['results']
count = data['count'] if 'count' in data else len(results)
else:
results = data
count = len(results)
new_data['data'] = results
if view and hasattr(view, '_datatables_filtered_count'):
count = view._datatables_filtered_count
if view and hasattr(view, '_datatables_total_count'):
total_count = view._datatables_total_count
else:
total_count = count
new_data['recordsFiltered'] = count
new_data['recordsTotal'] = total_count
else:
new_data = data
# add datatables "draw" parameter
new_data['draw'] = int(request.query_params.get('draw', '1'))
serializer_class = None
if hasattr(view, 'get_serializer_class'):
serializer_class = view.get_serializer_class()
elif hasattr(view, 'serializer_class'):
serializer_class = view.serializer_class
if serializer_class is not None and hasattr(serializer_class, 'Meta'):
force_serialize = getattr(
serializer_class.Meta, 'datatables_always_serialize', ()
)
else:
force_serialize = ()
self._filter_unused_fields(request, new_data, force_serialize)
return super(DatatablesRenderer, self).render(
new_data, accepted_media_type, renderer_context
)
def _filter_unused_fields(self, request, result, force_serialize):
cols = []
i = 0
while True:
col = request.query_params.get('columns[%d][data]' % i)
if col is None:
break
cols.append(col.split('.').pop(0))
i += 1
if len(cols):
data = result['data']
for i, item in enumerate(data):
try:
keys = set(item.keys())
except AttributeError:
continue
for k in keys:
if (k not in cols
and not k.startswith('DT_Row')
and k not in force_serialize):
result['data'][i].pop(k)
|
from components.sidebar import show_sidebar
from pages.datasets import show_datasets
from pages.home import show_home
from pages.data_visualization import show_data_visualization
from pages.species import show_species
from utils.constants import NAV_HOME, NAV_DATA, NAV_VIZ, NAV_SPECIES
import streamlit as st
st.set_page_config(
page_icon="🐧",
page_title="Palmer Archipelago Data Study",
layout="wide",
initial_sidebar_state="expanded"
)
show_sidebar()
page = st.session_state["page"]
if page == NAV_VIZ:
show_data_visualization()
if page == NAV_DATA:
show_datasets()
if page == NAV_HOME:
show_home()
if page == NAV_SPECIES:
show_species()
|
import urllib.request
import urllib.error
import urllib.parse
import json
from .market import Market
class BtceUSD(Market):
def __init__(self):
super(BtceUSD, self).__init__("USD")
# bitcoin central maximum call / day = 5000
# keep 2500 for other operations
self.update_rate = 60
def update_depth(self):
res = urllib.request.urlopen('https://btc-e.com/api/2/btc_usd/depth')
depth = json.loads(res.read().decode('utf8'))
self.depth = self.format_depth(depth)
def sort_and_format(self, l, reverse=False):
l.sort(key=lambda x: float(x[0]), reverse=reverse)
r = []
for i in l:
r.append({'price': float(i[0]), 'amount': float(i[1])})
return r
def format_depth(self, depth):
bids = self.sort_and_format(depth['bids'], True)
asks = self.sort_and_format(depth['asks'], False)
return {'asks': asks, 'bids': bids}
if __name__ == "__main__":
market = BtceUSD()
print(market.get_ticker())
|
import plotly.figure_factory as ff
import pandas as pd
import csv
df=pd.read_csv("data.csv")
fig=ff.create_distplot([df['Weight(Pounds)'].tolist()],['Weight'],show_hist=False)
fig.show()
|
#!/usr/bin/env python
"""
loading of various data for VAMP project
"""
import os
import numpy as np
### for loading images to numpy arrays with PIL
from scipy import misc
from calc.common import PIX_ERR
def read_grey_image(filename):
'''read single greyscale image'''
mesg = None
try:
img = misc.imread(filename) #8bit as uint8, 16bit as int32
except(IOError):
mesg = "Error: Can't open file %s!"%filename
return None, mesg
### check for greyscale
if img.ndim > 2:
mesg = "Error: file %s is not greyscale!"%filename
return None, mesg
### check if the image was more than 8-bit - scipy/PIL has a bug on it
if img.dtype == np.int32:
img = np.asarray(np.asfarray(img), np.int32)
return img, mesg
def read_conf_file(filename):
imgcfg = {}
try:
conffile = open(filename, 'r')
except IOError:
return imgcfg
lines = conffile.readlines()
conffile.close()
for line in lines:
key, value = line.split(None,1)
imgcfg[key] = value.rstrip('\n')
return imgcfg
def preproc_images(images, orientation, crop):
'''prepocess images
orientations - member of vampy.SIDES
crop - dictionary with keys as vampy.SIDES,
with respective relative crop amounts'''
### crop image
crop['bottom'] = images.shape[1] - crop['bottom']
crop['right'] = images.shape[2] - crop['right']
images = images[:, crop['top']:crop['bottom'],
crop['left']:crop['right']]
### rotate according to orientation flag
rolled = np.rollaxis(np.rollaxis(images, 1), 2, 1) # make first axis last
if orientation == 'right':
rolled = np.rot90(rolled, 2) # rot90 rotates only 2 first axes
if orientation == 'top':
rolled = np.rot90(rolled, 1)
elif orientation == 'bottom':
rolled = np.rot90(rolled, 3)
return np.rollaxis(rolled, 2) # bring original first axis back from last
def read_pressures_file(filename, stage):
"""
Reads pressures from file used in acquisition.
@param filename:
@param stage:
"""
try:
pressures = np.loadtxt(filename, unpack=1)
except IOError, value:
return None, value
except ValueError, value:
return None, value
return np.asarray(pressures)[stage], None
def read_pressures_filenames(filenames, stage):
mesg = None
pressures = []
index = []
fnames = map(os.path.basename, filenames)
pressstrings, exts = zip(*map(os.path.splitext, fnames))
try:
ind, press1, press2, subind = zip(
*[x.replace('_','-').split('-') for x in pressstrings])
except ValueError:
mesg = 'Wrong filenames format!'
return None, None, mesg
aver = len(filenames)/len(np.unique(ind))
if stage == 0:
press = press1
elif stage == 1:
press = press2
else:
mesg = 'Wrong stage number supplied!'
return None, None, mesg
try:
pressures = map(float, press)
except ValueError:
mesg = 'Wrong filenames format!'
return None, mesg
#reduce sequences of identical values to respective single value
pressure = [
x for i,x in enumerate(pressures) if i == 0 or x != pressures[i-1]]
if aver*len(pressure) != len(pressures):
mesg = 'Different number of images per pressure!'
return None, None, mesg
return np.asarray(pressure), aver, mesg
def read_tensions(filename):
try:
data = np.loadtxt(filename, unpack=True)
except IOError, value:
return None, value
except ValueError, value:
return None, value
tensiondata={}
tensiondata['tensdim'] = ('tension units', 'tension units')
tensiondata['dilation'] = data[1:3]
tensiondata['tension'] = data[3:5]
return tensiondata, None
def read_geometry_simple(filename):
"""Read hand-measured geometry file
This must be a tab-separated file (with #-comments if necessary) with 4 columns:
pipette radius,
position of aspirated vesicle part,
position of pipette mouth,
position of outside part of the vesicle,
metric of the axis (for small tilts this is cosine of the tilt angle)
The distances are assumed to already corrected by the tilt (as ImageJ does it),
so that the metrics is 1 and it's error is zero.
The single errors are taken to be sqrt(2)*PIX_ERR for pipette radius,
and PIX_ERR for everything else.
Returns dictionary of geometry data as accepted by analysis routines
and a message with reason of failure if any.
"""
try:
data = np.loadtxt(filename, unpack=True)
except IOError, value:
return None, value
except ValueError, value:
return None, value
piprad, asp, pip, ves, metrics = data
out = {}
out['piprads'] = np.asarray(
(piprad, PIX_ERR*np.sqrt(2)*np.ones_like(piprad)))
out['asps'] = np.asarray((asp, PIX_ERR*np.ones_like(asp)))
out['pips'] = np.asarray((pip, PIX_ERR*np.ones_like(pip)))
out['vess'] = np.asarray((ves, PIX_ERR*np.ones_like(ves)))
out['metrics'] = np.asarray((metrics, np.zeros_like(metrics)))
return out, None
def read_geometry_full():
pass
|
from django.shortcuts import HttpResponse,render
from django.db.models import Q
from apps.common.func.LanguageFunc import *
from apps.common.func.CommonFunc import *
from apps.common.config import commonWebConfig
from apps.config.services.http_confService import HttpConfService
from apps.task.services.HTTP_taskService import HTTP_taskService
from apps.config.services.businessLineService import BusinessService
from apps.config.services.modulesService import ModulesService
from apps.config.services.http_confService import HttpConfService
from apps.config.services.sourceService import SourceService
from apps.config.services.serviceConfService import ServiceConfService
from apps.interface.services.HTTP_interfaceService import HTTP_interfaceService
from apps.test_case.services.HTTP_test_caseService import HTTP_test_caseService
from apps.task.services.HTTP_task_executeService import HTTP_task_executeService
from urllib import parse
from apps.common.func.WebFunc import getServiceConf
from django.db.utils import *
import json,os
from apps.common.func.WebFunc import *
# from Redis.config.RedisDBConfig import *
from apps.version_manage.services.common_service import VersionService
from all_models.models.A0007_task import *
from apps.common.model.RedisDBConfig import *
from apps.common.decorator.permission_normal_funcitons import *
def http_teskCheck(request):
langDict = getLangTextDict(request)
context = {}
if not isRelease:
context["env"] = "test"
context["taskCheck"] = "current-page"
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpTaskPageHeadings_check"]
context["text"] = text
context["page"] = 1
return render(request, "InterfaceTest/HTTPTask/HTTP_taskCheck.html", context)
def http_taskListCheck(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
if VersionService.isCurrentVersion(request):
tbName = "tb_task"
versionCondition = ""
else:
tbName = "tb_version_task"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT t.*,addByName userName from %s t WHERE t.state=1 %s " %(tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "taskFounder" :
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (t.addBy LIKE %s or t.addByName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.modulesGroup LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.businessLineGroup LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql,attrList=checkList,page=page,pageNum=commonWebConfig.taskPageNum,request=request)
return render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_taskList_check_page.html",context)
def getTaskForId(request):
langDict = getLangTextDict(request)
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
taskData = HTTP_taskService.getTaskForId(id)
else:
taskData = HTTP_taskService.getVersionTaskForId(id,VersionService.getVersionName(request))
if not taskData:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON).toJson())
taskDataDict = dbModelToDict(taskData)
taskDataUser = dbModelToDict(taskData.addBy)
del taskDataUser["id"]
taskDataDict.update(taskDataUser)
context = {}
context.update(getServiceConf(request))
context["httpConf"] = HttpConfService.queryHttpConfSort(request)
context["taskData"] = taskDataDict
context["option"] = request.GET.get("option")
return render(request,"InterfaceTest/HTTPTask/SubPages/task_Run_DetailsPage.html",context)
# @permission_self
@single_add_page_permission
def taskAdd(request,context):
langDict = getLangTextDict(request)
context["interfacePage"] = 1
context["testCasePage"] = 1
context["option"] = "add"
if not isRelease:
context["env"] = "test"
context["taskAdd"] = "current-page"
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
context.update(getServiceConf(request))
text = {}
text["pageTitle"] = langDict["web"]["httpTaskPageHeadings_%s" % context["option"]]
text["subPageTitle"] = langDict["web"]["httpTaskSubPageTitle_%s" % context["option"]]
context["text"] = text
# return request.session.get("loginName"),request, "InterfaceTest/HTTPTask/HTTP_taskAdd.html", context
return render(request, "InterfaceTest/HTTPTask/HTTP_taskAdd.html", context)
def queryPeopleTask(request):
langDict = getLangTextDict(request)
pageNum = int(request.GET.get("num"))
if VersionService.isCurrentVersion(request):
attrData = HTTP_taskService.queryPeopleTask(pageNum, commonWebConfig.queryPeopleInterface,request.session.get("loginName"))
else:
attrData = HTTP_taskService.queryVersionPeopleTask(pageNum, commonWebConfig.queryPeopleInterface,request.session.get("loginName"),VersionService.getVersionName(request))
return HttpResponse(ApiReturn(ApiReturn.CODE_OK, langDict["web"]["httpTestCaseSuccess"], attrData).toJson())
def httpTestCaseSelectInterfaceCheckList(request):
page = request.POST.get("interfacePage")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
if VersionService.isCurrentVersion(request):
tbName = "tb_http_interface"
versionCondition = ""
else:
tbName = "tb_version_http_interface"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT i.*,u.userName from %s i LEFT JOIN tb_user u ON i.addBy = u.loginName LEFT JOIN tb_modules m ON i.moduleId = m.id LEFT JOIN tb_business_line b ON i.businessLineId = b.id WHERE 1=1 and i.state=1 %s " % (tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "caseFounder":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (i.addBy LIKE %s or u.userName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and m.moduleName LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and b.bussinessLineName LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and i.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.taskCheckInterfaceSelectPage)
response = render(request,"InterfaceTest/HTTPTestCase/SubPages/HTTP_TestCase_Select_interface_list_check_page.html", context)
return response
def httpTaskSelectTestCaseCheckList(request):
page = request.POST.get("testCasePage")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
if VersionService.isCurrentVersion(request):
tbName = "tb_http_testcase"
versionCondition = ""
else:
tbName = "tb_version_http_testcase"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT t.*,u.userName from %s t LEFT JOIN tb_user u ON t.addBy = u.loginName LEFT JOIN tb_modules m ON t.moduleId = m.id LEFT JOIN tb_business_line b ON t.businessLineId = b.id WHERE 1=1 and t.state=1 %s " %(tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "caseFounder":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (t.addBy LIKE %s or u.userName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and m.moduleName LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and b.bussinessLineName LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.interfaceSelectPageNum)
response = render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_Task_Select_TestCase_list_check_page.html", context)
return response
@single_data_permission(TbTask,TbVersionTask)
def taskAddData(request):
taskData = json.loads(request.body)
taskData = HTTP_taskService.taskDataToDict(request,taskData)
taskData["addBy_id"] = request.session.get("loginName")
taskData["addByName"] = request.session.get("userName")
if VersionService.isCurrentVersion(request):
# if "id" not in taskData.keys():
createTask = HTTP_taskService.addTask(taskData)
if createTask.id >= 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON,"任务保存失败").toJson())
# else:
# taskData["modTime"] = datetime.datetime.now()
# editTaskData = HTTP_taskService.editTask(taskData)
# if editTaskData == 1:
# return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
# else:
# return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务编辑保存失败").toJson())
else:
# if "id" not in taskData.keys():
createTask = HTTP_taskService.addVersionTask(taskData,VersionService.getVersionName(request))
if createTask.id >= 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务保存失败").toJson())
# else:
# taskData["modTime"] = datetime.datetime.now()
# editTaskData = HTTP_taskService.editVersionTask(taskData,VersionService.getVersionName(request))
# if editTaskData == 1:
# return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
# else:
# return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务编辑保存失败").toJson())
@single_data_permission(TbTask,TbVersionTask)
def taskDataSaveEdit(request):
taskData = json.loads(request.body)
taskData = HTTP_taskService.taskDataToDict(request,taskData)
if VersionService.isCurrentVersion(request):
taskData["modTime"] = datetime.datetime.now()
editTaskData = HTTP_taskService.editTask(taskData)
if editTaskData == 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务编辑保存失败").toJson())
else:
taskData["modTime"] = datetime.datetime.now()
editTaskData = HTTP_taskService.editVersionTask(taskData,VersionService.getVersionName(request))
if editTaskData == 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务编辑保存失败").toJson())
@single_page_permission
def operationTask(request,context):
langDict = getLangTextDict(request)
context["option"] = request.GET.get("option")
context["page"] = 1
if not isRelease:
context["env"] = "test"
try:
if VersionService.isCurrentVersion(request):
context["dataAddBy"] = HTTP_taskService.getTaskForId(request.GET.get("id")).addBy.loginName
else:
context["dataAddBy"] = HTTP_taskService.getVersionTaskForId(request.GET.get("id"),request.session.get("version")).addBy.loginName
except Exception as e:
print(traceback.format_exc())
return render(request, "permission/page_404.html")
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
context["id"] = request.GET.get("id")
context["interfacePage"] = 1
context["testCasePage"] = 1
context["taskAdd"] = "current-page"
text = {}
text["pageTitle"] = langDict["web"]["httpTaskPageHeadings_%s" % context["option"]]
context["text"] = text
context.update(getServiceConf(request))
return render(request, "InterfaceTest/HTTPTask/HTTP_taskAdd.html", context)
def getTaskData(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
taskDataModel = HTTP_taskService.findTaskForId(id)[0]
taskData = dbModelToDict(taskDataModel)
serviceConf = ServiceConfService.queryServiceConfSort(request)
highPriorityVARS = taskData["highPriorityVARS"]
taskData["priorityCommon"] = substr(highPriorityVARS, "[CONF=common]", "[ENDCONF]")
taskData["confPriority"] = {}
for i in range(0, len(serviceConf)):
if serviceConf[i]["serviceConfKey"] not in highPriorityVARS:
taskData["confPriority"]["priority%s" % serviceConf[i]["serviceConfKey"]] = ""
continue
taskData["confPriority"]["priority%s" % serviceConf[i]["serviceConfKey"]] = substr(highPriorityVARS,"[CONF=%s]" % serviceConf[i]["serviceConfKey"],"[ENDCONF]")
taskData["interfaceList"] = []
if taskData["taskInterfaces"]:
taskInterfaceList = taskData["taskInterfaces"].split(",")
for i in range(0,len(taskInterfaceList)):
try:
thisInterface = HTTP_interfaceService.getInterfaceForInterfaceId(taskInterfaceList[i])
if not thisInterface:
continue
taskData["interfaceList"].append(dbModelToDict(thisInterface))
addBy = dbModelToDict(thisInterface.addBy)
del addBy["id"]
del addBy["state"]
taskData["interfaceList"][i].update(addBy)
except Exception as e:
continue
taskData["testCaseList"] = []
if taskData["taskTestcases"]:
taskTestCaseList = taskData["taskTestcases"].split(",")
for i in range(0,len(taskTestCaseList)):
try:
thisTestCase = HTTP_test_caseService.getTestCaseForTestCaseId(taskTestCaseList[i])
taskData["testCaseList"].append(dbModelToDict(thisTestCase))
addBy = dbModelToDict(thisTestCase.addBy)
del addBy["id"]
del addBy["state"]
taskData["testCaseList"][i].update(addBy)
except Exception as e:
continue
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=taskData).toJson())
else:
taskDataModel = HTTP_taskService.findVersionTaskForId(id,VersionService.getVersionName(request))[0]
taskData = dbModelToDict(taskDataModel)
serviceConf = ServiceConfService.queryServiceConfSort(request)
highPriorityVARS = taskData["highPriorityVARS"]
taskData["priorityCommon"] = substr(highPriorityVARS, "[CONF=common]", "[ENDCONF]")
taskData["confPriority"] = {}
for i in range(0, len(serviceConf)):
if serviceConf[i]["serviceConfKey"] not in highPriorityVARS:
taskData["confPriority"]["priority%s" % serviceConf[i]["serviceConfKey"]] = ""
continue
taskData["confPriority"]["priority%s" % serviceConf[i]["serviceConfKey"]] = substr(highPriorityVARS,"[CONF=%s]" % serviceConf[i]["serviceConfKey"],"[ENDCONF]")
taskData["interfaceList"] = []
if taskData["taskInterfaces"]:
taskInterfaceList = taskData["taskInterfaces"].split(",")
for i in range(0,len(taskInterfaceList)):
try:
thisInterface = HTTP_interfaceService.getVersionInterfaceForInterfaceId(taskInterfaceList[i],VersionService.getVersionName(request))
if not thisInterface:
continue
taskData["interfaceList"].append(dbModelToDict(thisInterface))
addBy = dbModelToDict(thisInterface.addBy)
del addBy["id"]
del addBy["state"]
taskData["interfaceList"][i].update(addBy)
# print( taskData["interfaceList"][i])
except Exception as e:
# print(addBy)
# taskData["interfaceList"][i].update(addBy)
taskData["interfaceList"].append('')
continue
taskData["testCaseList"] = []
if taskData["taskTestcases"]:
taskTestCaseList = taskData["taskTestcases"].split(",")
for i in range(0,len(taskTestCaseList)):
try:
thisTestCase = HTTP_test_caseService.getVersionTestCaseForTestCaseId(taskTestCaseList[i],VersionService.getVersionName(request))
taskData["testCaseList"].append(dbModelToDict(thisTestCase))
addBy = dbModelToDict(thisTestCase.addBy)
del addBy["id"]
del addBy["state"]
taskData["testCaseList"][i].update(addBy)
except Exception as e:
taskData["interfaceList"].append('')
continue
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=taskData).toJson())
@single_data_permission(TbTask,TbVersionTask)
def taskDel(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
try:
taskData = HTTP_taskService.getTaskForId(id)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "参数id错误 %s" % e).toJson())
if HTTP_taskService.delTaskForId(request,id) == 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON,"删除失败,请联系管理员").toJson())
else:
try:
taskData = HTTP_taskService.getVersionTaskById(id)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "参数id错误 %s" % e).toJson())
# if request.session.get("loginName") != taskData.addBy.loginName:
# return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "只能删除自己创建的任务").toJson())
if HTTP_taskService.delVersionTaskForId(request,id) == 1:
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "删除失败,请联系管理员").toJson())
def taskDelTheSameCase(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
try:
taskData = HTTP_taskService.getTaskForId(id)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "参数id错误 %s" % e).toJson())
if request.session.get("loginName") != taskData.addBy.loginName:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "只能去重自己创建的任务").toJson())
#开始对task进行去重并保存。
else:
try:
taskData = HTTP_taskService.getVersionTaskById(id)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "参数id错误 %s" % e).toJson())
if request.session.get("loginName") != taskData.addBy.loginName:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "只能去重自己创建的任务").toJson())
#开始对VersionTask进行去重并保存
oldTaskInterfaces = taskData.taskInterfaces
oldInterfaceList = oldTaskInterfaces.split(",")
newInterfaceList = list(set(oldInterfaceList))
newInterfaceListStr = ""
for tmpInterface in newInterfaceList:
newInterfaceListStr += tmpInterface + ","
newInterfaceListStr = newInterfaceListStr[:-1]
taskData.taskInterfaces = newInterfaceListStr
oldTaskCases = taskData.taskTestcases
oldCaseList = oldTaskCases.split(",")
newCaseList = list(set(oldCaseList))
newCaseStr = ""
for tmpCase in newCaseList:
newCaseStr += tmpCase + ","
newCaseStr = newCaseStr[:-1]
taskData.taskTestcases = newCaseStr
try:
taskData.save(force_update=True)
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_INTERFACE_ERROR, '去重失败!').toJson())
def taskResultCheck(request):
langDict = getLangTextDict(request)
context = {}
if not isRelease:
context["env"] = "test"
context["taskExecuteResult"] = "current-page"
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
context["httpConf"] = HttpConfService.queryHttpConfSort(request)
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpTaskCheckPageHeadings_check"]
context["text"] = text
context["page"] = 1
return render(request, "InterfaceTest/HTTPTask/HTTP_task_ExecResult.html", context)
def getTaskResultList(request):
t1 = datetime.datetime.now()
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
execSql = "SELECT t.*,addByName userName,httpConfKeyAlias alias from tb_task_execute t WHERE (t.execStatus between 1 and 2) or (t.state=1"
checkList = []
print(checkArr)
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "taskFounder":
checkList.append("%s%%" % checkArr[key])
execSql += """ and t.addByName LIKE %s """
continue
elif key == "module":
checkList.append("%s%%" % checkArr[key])
execSql += """ and t.modulesGroup LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.businessLineGroup LIKE %s """
continue
elif key == "httpConfKey":
checkList.append("%s" % checkArr[key])
execSql += """ and t.httpConfKeyalias = %s """
continue
elif key == "taskId":
checkList.append("%s" % checkArr[key])
execSql += """ and t.taskId = %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and t.%s """ % key
execSql += """ LIKE %s"""
execSql += """) ORDER BY %s,%s""" % ("t.execStatus asc",orderBy)
print(execSql)
print(checkList)
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.taskPageNum)
for pageData in context["pageDatas"]:
execProgressDataLen = pageData["execProgressData"].split(":")
try:
pageData["execPercent"] = "pass"
pageData["execColor"] = "success"
pageData["executeCount"] = (
int(execProgressDataLen[1]) + int(execProgressDataLen[2]) + int(execProgressDataLen[3]))
pageData["passCount"] = int(execProgressDataLen[1])
pageData["failCount"] = int(execProgressDataLen[2])
pageData["errorCount"] = int(execProgressDataLen[3])
pageData["passPercent"] = int(
(pageData["executeCount"] / int(execProgressDataLen[0])) * 100)
if int(execProgressDataLen[2]) > 0 or int(execProgressDataLen[3]) > 0:
pageData["execPercent"] = "fail"
pageData["execColor"] = "danger"
except ZeroDivisionError:
pageData["passPercent"] = 0
#版本号
if pageData["version"] == "CurrentVersion":
pageData["versionText"] = request.session.get("CurrentVersion")
else:
pageData["versionText"] = pageData["version"]
#执行备注
if pageData["execComments"] == "":
pageData["execComments"] = "-"
#保存到历史记录
if pageData["isSaveHistory"] == 1:
pageData["isSaveHistoryText"] = "是"
else:
pageData["isSaveHistoryText"] = "否"
#发送报告邮件
if str(pageData["isSendEmail"])[0] == "1":
pageData["isSendEmailText"] = "是"
else:
pageData["isSendEmailText"] = "否"
response = render(request, "InterfaceTest/HTTPTask/SubPages/task_result_list_page.html", context)
return response
def updateTaskExecuteProgressData(request):
taskExecuteIdList = request.POST.get("taskExecuteIds").split(",")
redisCache = RedisCache()
resultDict = {}
for idIndex in taskExecuteIdList:
try:
selfData = redisCache.get_data("%s_taskExecute_%s" % ("HTTP",idIndex))
# print(selfData)
selfStatus = redisCache.get_data("%s_taskExecuteStatus_%s" % ("HTTP",idIndex))
except ValueError:
taskExecute = TbTaskExecute.objects.get(id=idIndex)
selfData = taskExecute.execProgressData
selfStatus = taskExecute.execStatus
# print(taskExecute.execStatus)
if selfData == None or int(selfStatus) == 10 or int(selfStatus) == 11 or int(selfStatus) == 3:
#已经有任务执行完毕了,要刷新页面
return HttpResponse(ApiReturn(ApiReturn.CODE_RELOAD).toJson())
else:
resultDict[idIndex] = {}
execProgressDataLen = selfData.split(":")
resultDict[idIndex]["status"] = selfStatus
resultDict[idIndex]["execPercent"] = "pass"
resultDict[idIndex]["execColor"] = "success"
resultDict[idIndex]["executeCount"] = (int(execProgressDataLen[1]) + int(execProgressDataLen[2]) + int(execProgressDataLen[3]))
resultDict[idIndex]["passCount"] = int(execProgressDataLen[1])
resultDict[idIndex]["failCount"] = int(execProgressDataLen[2])
resultDict[idIndex]["errorCount"] = int(execProgressDataLen[3])
if int(execProgressDataLen[0]) == 0:
resultDict[idIndex]["passPercent"] = 0.00
else:
resultDict[idIndex]["passPercent"] = int((resultDict[idIndex]["executeCount"] / int(execProgressDataLen[0])) * 100)
if int(execProgressDataLen[2]) > 0 or int(execProgressDataLen[3]) > 0:
resultDict[idIndex]["execPercent"] = "fail"
resultDict[idIndex]["execColor"] = "danger"
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=resultDict).toJson())
def queryPeopleTaskExecute(request):
langDict = getLangTextDict(request)
pageNum = int(request.GET.get("num"))
attrData = HTTP_task_executeService.queryPeopleTaskExecute(pageNum, commonWebConfig.queryPeopleInterface,request.session.get("loginName"))
return HttpResponse(ApiReturn(ApiReturn.CODE_OK, langDict["web"]["httpTestCaseSuccess"], attrData).toJson())
def getTaskRestltDetail(request):
id = request.GET.get("id")
taskExecDataModel = HTTP_task_executeService.findTaskRestltForId(id)
taskExecData = dbModelToDict(taskExecDataModel)
taskExecData.update(dbModelToDict(taskExecDataModel.httpConfKey))
taskExecData.update(dbModelToDict(taskExecDataModel.addBy))
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=json.dumps(taskExecData)).toJson())
@sql_inject_validate
def getInterfeceListDataForTask(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
taskDataModel = HTTP_taskService.getTaskForId(id)
taskData = dbModelToDict(taskDataModel)
getInterFaceDataSql = taskData["taskInterfaces"].replace(",","' union all select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '")
sql = "select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '%s'" % getInterFaceDataSql
else:
taskDataModel = HTTP_taskService.getVersionTaskById(id)
taskData = dbModelToDict(taskDataModel)
getInterFaceDataSql = taskData["taskInterfaces"].replace(",","' union all select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_version_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '")
sql = "select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_version_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '%s' and versionName='%s'" % (getInterFaceDataSql,VersionService.getVersionName(request))
taskInterfaceListData = executeSqlGetDict(sql)
response = render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_Task_Details_Select_interface_list_check_page.html", {"pageDatas":taskInterfaceListData})
return response
def getTestCaseListDataForTask(request):
id = request.GET.get("id")
if VersionService.isCurrentVersion(request):
taskDataModel = HTTP_taskService.getTaskForId(id)
taskData = dbModelToDict(taskDataModel)
getTestCaseDataSql = taskData["taskTestcases"].replace(",","' union all select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId = '")
sql = "select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId ='%s'" % getTestCaseDataSql
else:
taskDataModel = HTTP_taskService.getVersionTaskById(id)
taskData = dbModelToDict(taskDataModel)
getTestCaseDataSql = taskData["taskTestcases"].replace(",","' union all select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_version_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId = '")
sql = "select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_version_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId ='%s' and versionName='%s'" % (getTestCaseDataSql,VersionService.getVersionName(request))
taskTestCaseListData = executeSqlGetDict(sql)
response = render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_Task_Details_Select_TestCase_list_check_page.html", {"pageDatas":taskTestCaseListData})
return response
def getInterfeceListData(request):
#根据任务执行结果
id = request.GET.get("id")
taskDataModel = HTTP_task_executeService.findTaskRestltForId(id)
taskData = dbModelToDict(taskDataModel)
if taskDataModel.version == "CurrentVersion":
getInterFaceDataSql = taskData["taskInterfaces"].replace(",","' union all select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '")
sql = "select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '%s'" % getInterFaceDataSql
else:
getInterFaceDataSql = taskData["taskInterfaces"].replace(",","' union all select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_version_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '")
sql = "select thi.id,thi.interfaceId,thi.title,thi.casedesc,thi.url,thi.addBy,tu.userName from tb_version_http_interface thi LEFT JOIN tb_user tu on thi.addBy = tu .loginName where interfaceId = '%s' and versionName='%s'" % (getInterFaceDataSql,taskDataModel.version)
taskInterfaceListData = executeSqlGetDict(sql)
response = render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_Task_Details_Select_interface_list_check_page.html", {"pageDatas":taskInterfaceListData})
return response
def getTestCaseListData(request):
id = request.GET.get("id")
taskDataModel = HTTP_task_executeService.findTaskRestltForId(id)
taskData = dbModelToDict(taskDataModel)
if taskDataModel.version == "CurrentVersion":
getTestCaseDataSql = taskData["taskTestcases"].replace(",","' union all select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId = '")
sql = "select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId ='%s'" % getTestCaseDataSql
else:
getTestCaseDataSql = taskData["taskTestcases"].replace(",","' union all select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_version_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId = '")
sql = "select tht.id,tht.caseId,tht.title,tht.casedesc,tht.stepCount,tht.addBy,tu.userName from tb_version_http_testcase tht LEFT JOIN tb_user tu on tht.addBy = tu .loginName where tht.caseId ='%s' and versionName='%s'" % (getTestCaseDataSql,taskDataModel.version)
taskTestCaseListData = executeSqlGetDict(sql)
response = render(request,"InterfaceTest/HTTPTask/SubPages/HTTP_Task_Details_Select_TestCase_list_check_page.html", {"pageDatas":taskTestCaseListData})
return response
def againRunTask(request):
#历史版本再次执行取任务错误。
id = request.GET.get("id")
res = HTTP_task_executeService.againRunTask(id,request.session.get("loginName"),request.session.get("userName"))
if res == False:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, '任务已被删除').toJson())
result = dbModelToDict(res)
tcpin = '{"do":3,"TaskExecuteId":%s,"TaskExecuteEnv":"%s","TaskId":"%s","protocol":"HTTP"}' % (result["id"],result["httpConfKey_id"],result["taskId"])
RedisCache().set_data("%s_taskExecute_%s" % ("HTTP",result["id"]), "0:0:0:0:0")
RedisCache().set_data("%s_taskExecuteStatus_%s" % ("HTTP",result["id"]), "1")
retApiResult = send_tcp_request(tcpin)
if retApiResult.code != ApiReturn.CODE_OK:
HTTP_task_executeService.updateFailExecute(result["id"],retApiResult.message)
RedisCache().del_data("%s_taskExecute_%s" % ("HTTP",result["id"]))
RedisCache().del_data("%s_taskExecuteStatus_%s" % ("HTTP",result["id"]))
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, retApiResult.message).toJson())
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
def stopTaskRun(request):
id = request.GET.get("id")
taskSuiteExecuteId = request.GET.get("taskSuiteExecuteId")
try:
HTTP_task_executeService.stopTaskRun(id)
RedisCache().set_data("%s_taskExecuteStatus_%s" % ("HTTP",id),"10")
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON,"请验证id正确性%s" % e).toJson())
tcpin = '{"do":4,"TaskExecuteId":%s,"protocol":"HTTP","TaskSuiteExecuteId":%s}' % (id,taskSuiteExecuteId)
retApiResult = send_tcp_request(tcpin)
if retApiResult.code != ApiReturn.CODE_OK:
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, retApiResult.message).toJson())
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
def taskRunAdd(request):
if VersionService.isCurrentVersion(request):
taskData = dbModelToDict(HTTP_taskService.getTaskForId(request.POST.get("id")))
else:
taskData = dbModelToDict(HTTP_taskService.getVersionTaskForId(request.POST.get("id"),VersionService.getVersionName(request)))
del taskData["versionName_id"]
del taskData["id"]
taskData["protocol"] = request.POST.get("protocol")
taskData["emailList"] = request.POST.get("emailList")
taskData["addBy_id"] = request.session.get("loginName")
taskData["addByName"] = request.session.get("userName")
taskData["isSaveHistory"] = request.POST.get("isSaveHistory")
taskData["isSendEmail"] = request.POST.get("isSendEmail")
taskData["execComments"] = request.POST.get("execComments")
taskData["retryCount"] = request.POST.get("retryCount")
taskData["execBy_id"] = request.session.get("loginName")
taskData["execByName"] = request.session.get("userName")
taskData["version"] = VersionService.getVersionName(request)
httpConfList = request.POST.get("httpConfKey_id").split(",")
retmsg = 0
for httpConfIndex in range(0,len(httpConfList)):
taskData["httpConfKey_id"] = httpConfList[httpConfIndex]
taskData["httpConfKeyAlias"] = TbConfigHttp.objects.filter(httpConfKey=httpConfList[httpConfIndex])[0].alias
cres = HTTP_task_executeService.taskRunAdd(taskData)
addDataResult = dbModelToDict(cres)
# 将任务执行的信息写入缓存,任务执行的前缀为 taskExecute_executeId
RedisCache().set_data("%s_taskExecute_%s" % ("HTTP",addDataResult["id"]),"0:0:0:0:0",60*60*12)
RedisCache().set_data("%s_taskExecuteStatus_%s" % ("HTTP",addDataResult["id"]),"1",60*60*12)
# tcpin = '{"do":3,"TaskExecuteId":"%s"}' % addDataResult["id"]
tcpin = '{"do":3,"TaskExecuteId":%s,"TaskExecuteEnv":"%s","TaskId":"%s","protocol":"HTTP"}' % (addDataResult["id"], addDataResult["httpConfKey_id"], addDataResult["taskId"])
retApiResult = send_tcp_request(tcpin)
if retApiResult.code != ApiReturn.CODE_OK:
retmsg = 1
RedisCache().del_data("%s_taskExecute_%s" % ("HTTP",addDataResult["id"]))
RedisCache().del_data("%s_taskExecuteStatus_%s" % ("HTTP",addDataResult["id"]))
if retmsg == 1:
addUserLog(request,"任务管理->任务执行->任务执行添加成功,但是执行服务出现异常,请联系管理员","FAIL")
return HttpResponse(ApiReturn(ApiReturn.CODE_TASK_EXCEPITON, "任务执行添加成功,但是执行服务出现异常,请联系管理员").toJson())
addUserLog(request, "任务管理->任务执行->成功", "PASS")
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
def getSelectExecuteStatus(request):
sql = "SELECT testResult,count(*) as count from tb_task_execute GROUP BY testResult"
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,body=executeSqlGetDict(sql,[])).toJson())
def contrastTaskResult(request):
context = {}
taskIds = request.POST.get("taskId")
taskIdList = taskIds.split(",")
if len(taskIdList) != 2:
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR,message="只能选择两个任务进行比对").toJson())
ret = HTTP_task_executeService.contrastTask(taskIdList)
if "code" in ret.keys():
return HttpResponse(ApiReturn(ret["code"], message=ret["message"]).toJson())
context["taskContrastDict"] = ret
return render(request,"InterfaceTest/HTTPTask/SubPages/taskContrast.html",context)
def mergeTask(request):
taskIds = request.POST.get("taskId","")
taskIdList = taskIds.split(",")
for index in range(1,len(taskIdList)):
if taskIdList[0] == taskIdList[index]:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR,message="副任务中包含主任务,不能合并").toJson())
tbModule = TbTask.objects.get(taskId=taskIdList[0])
businessLineList = []
moduleList = []
emailList = []
interfeces = ""
testCases= ""
interfaceNum = 0
sourceList = []
for index in taskIdList:
try:
tmpTaskData = HTTP_taskService.getTaskForTaskId(index)
except:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR,message="任务不存在").toJson())
interfaceNum += tmpTaskData.interfaceNum
if interfaceNum > commonWebConfig.maxTaskInculedInterfaceNum:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR, message="合并任务包含总接口数量超过阈值%s,合并失败" % commonWebConfig.maxTaskInculedInterfaceNum).toJson())
businessLineList.extend(BL_MD_isList(tmpTaskData.businessLineGroup))
moduleList.extend(BL_MD_isList(tmpTaskData.modulesGroup))
sourceList.extend(BL_MD_isList(tmpTaskData.sourceGroup))
if tmpTaskData.emailList != "":
emailList.extend(tmpTaskData.emailList.split(","))
if interfeces == "":
interfeces = tmpTaskData.taskInterfaces
else:
interfeces = "%s,%s"%(interfeces,tmpTaskData.taskInterfaces)
if testCases == "":
testCases = tmpTaskData.taskTestcases
else:
testCases = "%s,%s"%(testCases,tmpTaskData.taskTestcases)
try:
#业务线和模块和email去重
tbModule.businessLineGroup = list(set(businessLineList))
tbModule.modulesGroup = list(set(moduleList))
tbModule.taskInterfaces = interfeces
tbModule.taskTestcases = testCases
tbModule.interfaceNum = interfaceNum
tbModule.emailList = ','.join(list(set(emailList)))
tbModule.sourceGroup = list(set(sourceList))
tbModule.save()
except Exception as e:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR, message="任务合并出错,请联系管理员").toJson())
return HttpResponse(ApiReturn().toJson())
def executeIdforTask(request):
taskId = request.GET.get("taskId", "")
date = request.GET.get("date", "")
# 如果date为空,默认查询1天内数据
if date == "":
dateTo = datetime.date.today()
dateFrom = dateTo - datetime.timedelta(days=1)
else:
try:
dateFrom = datetime.datetime.strptime(date, '%Y-%m-%d')
dateTo = dateFrom + datetime.timedelta(days=1)
except Exception as e:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR, message="date格式不正确").toJson())
executeId = HTTP_taskService.getExecuteIdByTaskId(taskId,dateFrom,dateTo)
if executeId:
executeResult = dbModelToDict(executeId)
else:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR, message="任务"+taskId+"不存在").toJson())
return HttpResponse(ApiReturn(code=ApiReturn.CODE_ERROR, message="",body={'result':executeResult['testResultMsg']}).toJson())
|
from walt.common.thread import RPCThreadConnector
from walt.common.apilink import AttrCallRunner, AttrCallAggregator
class BlockingTasksManager(RPCThreadConnector):
def session(self, requester):
# we will receive:
# service.<func>(rpc_context, <args...>)
# and we must forward the call as:
# requester.<func>(<args...>)
# the following code handles this forwarding
# and removal of the 'rpc_context' parameter.
runner = AttrCallRunner(requester)
def forward_to_requester(attr, args, kwargs):
return runner.do(attr, args[1:], kwargs)
service = AttrCallAggregator(forward_to_requester)
return self.local_service(service)
def clone_image(self, requester, result_cb, *args, **kwargs):
self.session(requester).m_async.clone_image(*args, **kwargs).then(result_cb)
def search_image(self, requester, result_cb, *args, **kwargs):
self.session(requester).m_async.search_image(*args, **kwargs).then(result_cb)
def publish_image(self, requester, result_cb, *args, **kwargs):
self.session(requester).m_async.publish_image(*args, **kwargs).then(result_cb)
def update_hub_metadata(self, requester, result_cb, *args, **kwargs):
self.session(requester).m_async.update_hub_metadata(*args, **kwargs).then(result_cb)
def pull_image(self, image_fullname, result_cb):
self.m_async.pull_image(image_fullname).then(result_cb)
def stream_db_logs(self, logs_handler):
# request the blocking task to stream db logs
self.session(logs_handler).m_async.stream_db_logs(
**logs_handler.params)
|
import logging
import os
from collections import namedtuple
from data_analysis_compare_sr_ud import extract_overlapping_data
logger = logging.getLogger('filtering_ud_data')
logging.basicConfig(level=logging.INFO)
DataInfo = namedtuple('DataInfo', ['sc', 'ss', 'ud', 'out'])
DATA_SPLITS = ['train', 'dev']
UD_DATASETS = ['UD_Arabic', 'UD_Czech', 'UD_English', 'UD_Spanish-AnCora', 'UD_Finnish',
'UD_French', 'UD_Italian', 'UD_Dutch', 'UD_Portuguese', 'UD_Russian-SynTagRus']
LANGS = ['ar', 'cs', 'en', 'es_ancora', 'fi',
'fr', 'it', 'nl', 'pt', 'ru_syntagrus']
HOME = os.environ['HOME']
OUTPUT_FOLDER = '{home}/var/data/sr2018/ud_filtered'.format(home=HOME)
for split in DATA_SPLITS:
d = os.path.join(OUTPUT_FOLDER, split)
if not os.path.exists(d):
os.makedirs(d)
for i, dset in enumerate(UD_DATASETS):
lang = LANGS[i]
for split in DATA_SPLITS:
ud_fname = '{home}/var/data/ud-treebanks-v2.0/{dataset}/{lang}-ud-{split}.conllu'.format(home=HOME,
dataset=dset,
lang=lang,
split=split)
sc_fname = '{home}/var/data/sr2018/T1-input/{split}/{lang}-ud-{split}.conll'.format(home=HOME,
lang=lang,
split=split)
ss_fname = '{home}/var/data/sr2018/Sentences/{split}/{lang}-ud-{split}_sentences.txt'.format(home=HOME,
lang=lang,
split=split)
out_fname = '{output_folder}/{split}/{lang}-ud-{split}.filtered.conll'.format(output_folder=OUTPUT_FOLDER,
lang=lang,
split=split)
data_info = DataInfo(ud=ud_fname, sc=sc_fname, ss=ss_fname, out=out_fname)
extract_overlapping_data(args=data_info)
logger.info('DONE: %s', lang)
print('\n')
|
import time
import numpy as np
import tensorflow as tf
import metrics
import modeling
import optimization
# Prepare and import BERT modules
import sys
import os
# !test -d bert_repo || git clone https://github.com/nyu-dl/dl4marco-bert dl4marco-bert
# if not 'dl4marco-berto' in sys.path:
# sys.path += ['dl4marco-bert']
# Prepare for training:
# Specify training data.
# Specify BERT pretrained model
# Specify GS bucket, create output directory for model checkpoints and eval results.
INIT_CHECKPOINT = 'gs://trec_dl_passage_ranking/trained_model/model.ckpt-100000' #@param {type:"string"}
print('***** BERT Init Checkpoint: {} *****'.format(INIT_CHECKPOINT))
OUTPUT_DIR = 'gs://trec_dl_passage_ranking/output' #@param {type:"string"}
assert OUTPUT_DIR, 'Must specify an existing GCS bucket name'
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# Now we need to specify the input data dir. Should contain the .tfrecord files
# and the supporting query-docids mapping files.
DATA_DIR = 'gs://trec_dl_passage_ranking/' + sys.argv[1] #@param {type:"string"}
print('***** Data directory: {} *****'.format(DATA_DIR))
# Train / evaluate
# Parameters
USE_TPU = True
DO_TRAIN = False # Whether to run training.
DO_EVAL = True # Whether to run evaluation.
TRAIN_BATCH_SIZE = 32
EVAL_BATCH_SIZE = 32
LEARNING_RATE = 1e-6
NUM_TRAIN_STEPS = 400000
NUM_WARMUP_STEPS = 40000
MAX_SEQ_LENGTH = 512
SAVE_CHECKPOINTS_STEPS = 1000
ITERATIONS_PER_LOOP = 1000
NUM_TPU_CORES = 8
BERT_CONFIG_FILE = os.path.join('gs://cloud-tpu-checkpoints/bert/uncased_L-24_H-1024_A-16/bert_config.json')
MAX_EVAL_EXAMPLES = None # Maximum number of examples to be evaluated.
NUM_EVAL_DOCS = 1000 # Number of docs per query in the dev and eval files.
METRICS_MAP = ['MAP', 'RPrec', 'NDCG', 'MRR', 'MRR@10']
FAKE_DOC_ID = '5500000'
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode,
params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(
" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, log_probs) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids,
label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
initialized_variable_names = []
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint,
assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"log_probs": log_probs,
"label_ids": label_ids,
},
scaffold_fn=scaffold_fn)
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(dataset_path, seq_length, is_training,
max_eval_examples=None, num_skip=0):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
output_buffer_size = batch_size * 1000
def extract_fn(data_record):
features = {
"query_ids": tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True),
"doc_ids": tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True),
"label": tf.FixedLenFeature([], tf.int64),
"len_gt_titles": tf.FixedLenFeature([], tf.int64),
}
sample = tf.parse_single_example(data_record, features)
query_ids = tf.cast(sample["query_ids"], tf.int32)
doc_ids = tf.cast(sample["doc_ids"], tf.int32)
label_ids = tf.cast(sample["label"], tf.int32)
len_gt_titles = tf.cast(sample["len_gt_titles"], tf.int32)
input_ids = tf.concat((query_ids, doc_ids), 0)
query_segment_id = tf.zeros_like(query_ids)
doc_segment_id = tf.ones_like(doc_ids)
segment_ids = tf.concat((query_segment_id, doc_segment_id), 0)
input_mask = tf.ones_like(input_ids)
features = {
"input_ids": input_ids,
"segment_ids": segment_ids,
"input_mask": input_mask,
"label_ids": label_ids,
"len_gt_titles": len_gt_titles,
}
return features
dataset = tf.data.TFRecordDataset([dataset_path])
dataset = dataset.map(
extract_fn, num_parallel_calls=4).prefetch(output_buffer_size)
if is_training:
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=1000)
else:
if num_skip > 0:
dataset = dataset.skip(num_skip)
if max_eval_examples:
# Use at most this number of examples (debugging only).
dataset = dataset.take(max_eval_examples)
# pass
dataset = dataset.padded_batch(
batch_size=batch_size,
padded_shapes={
"input_ids": [seq_length],
"segment_ids": [seq_length],
"input_mask": [seq_length],
"label_ids": [],
"len_gt_titles": [],
},
padding_values={
"input_ids": 0,
"segment_ids": 0,
"input_mask": 0,
"label_ids": 0,
"len_gt_titles": 0,
},
drop_remainder=True)
return dataset
return input_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not DO_TRAIN and not DO_EVAL:
raise ValueError(
"At least one of `DO_TRAIN` or `DO_EVAL` must be True.")
bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG_FILE)
if MAX_SEQ_LENGTH > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(MAX_SEQ_LENGTH, bert_config.max_position_embeddings))
tpu_cluster_resolver = None
if USE_TPU:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
TPU_ADDRESS)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=OUTPUT_DIR,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=2,
init_checkpoint=INIT_CHECKPOINT,
learning_rate=LEARNING_RATE,
num_train_steps=NUM_TRAIN_STEPS,
num_warmup_steps=NUM_WARMUP_STEPS,
use_tpu=USE_TPU,
use_one_hot_embeddings=USE_TPU)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=USE_TPU,
model_fn=model_fn,
config=run_config,
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=EVAL_BATCH_SIZE)
if DO_TRAIN:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", TRAIN_BATCH_SIZE)
tf.logging.info(" Num steps = %d", NUM_TRAIN_STEPS)
train_input_fn = input_fn_builder(
dataset_path=DATA_DIR + "/dataset_train.tf",
seq_length=MAX_SEQ_LENGTH,
is_training=True)
estimator.train(input_fn=train_input_fn,
max_steps=NUM_TRAIN_STEPS)
tf.logging.info("Done Training!")
if DO_EVAL:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", EVAL_BATCH_SIZE)
predictions_path = OUTPUT_DIR + "/msmarco_predictions_dev.tsv"
total_count = 0
if tf.gfile.Exists(predictions_path):
with tf.gfile.Open(predictions_path, "r") as predictions_file:
total_count = sum(1 for line in predictions_file)
tf.logging.info(
"{} examples already processed. Skipping them.".format(
total_count))
query_docids_map = []
with tf.gfile.Open(
DATA_DIR + "/query_doc_ids.txt") as ref_file:
for line in ref_file:
query_docids_map.append(line.strip().split("\t"))
max_eval_examples = None
if MAX_EVAL_EXAMPLES:
max_eval_examples = MAX_EVAL_EXAMPLES * NUM_EVAL_DOCS
eval_input_fn = input_fn_builder(
dataset_path=DATA_DIR + "/dataset.tf",
seq_length=MAX_SEQ_LENGTH,
is_training=False,
max_eval_examples=max_eval_examples,
num_skip=total_count)
# ***IMPORTANT NOTE***
# The logging output produced by the feed queues during evaluation is very
# large (~14M lines for the dev set), which causes the tab to crash if you
# don't have enough memory on your local machine. We suppress this
# frequent logging by setting the verbosity to WARN during the evaluation
# phase.
tf.logging.set_verbosity(tf.logging.WARN)
result = estimator.predict(input_fn=eval_input_fn,
yield_single_examples=True)
start_time = time.time()
results = []
all_metrics = np.zeros(len(METRICS_MAP))
example_idx = 0
for item in result:
results.append((item["log_probs"], item["label_ids"]))
total_count += 1
if len(results) == NUM_EVAL_DOCS:
log_probs, labels = zip(*results)
log_probs = np.stack(log_probs).reshape(-1, 2)
labels = np.stack(labels)
scores = log_probs[:, 1]
pred_docs = scores.argsort()[::-1]
# pred_docs = np.arange(len(pred_docs))
gt = set(list(np.where(labels > 0)[0]))
all_metrics += metrics.metrics(
gt=gt, pred=pred_docs, metrics_map=METRICS_MAP)
start_idx = total_count - NUM_EVAL_DOCS
end_idx = total_count
query_ids, doc_ids = zip(*query_docids_map[start_idx:end_idx])
assert len(
set(query_ids)) == 1, "Query ids must be all the same."
query_id = query_ids[0]
# Workaround to make mode=a work when the file was not yet created.
mode = "w"
if tf.gfile.Exists(predictions_path):
mode = "a"
with tf.gfile.Open(predictions_path, mode) as predictions_file:
for rank, doc_idx in enumerate(pred_docs):
doc_id = doc_ids[doc_idx]
predictions_file.write(
"\t".join((query_id, doc_id, str(rank + 1))) + "\n")
example_idx += 1
results = []
if total_count % 10000 == 0:
tf.logging.warn(
"Read {} examples in {} secs. Metrics so far:".format(
total_count, int(time.time() - start_time)))
tf.logging.warn(" ".join(METRICS_MAP))
tf.logging.warn(all_metrics / example_idx)
# Once the feed queues are finished, we can set the verbosity back to
# INFO.
tf.logging.set_verbosity(tf.logging.INFO)
all_metrics /= example_idx
tf.logging.info("Final Metrics:")
tf.logging.info(" ".join(METRICS_MAP))
tf.logging.info(all_metrics)
tf.logging.info("Done Evaluating!")
if __name__ == "__main__":
tf.app.run()
|
import time
import argparse
import os
import sys
import pptk
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, '../utils'))
from commons import check_mkdir, force_mkdir
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default='log', help='Log dir [default: log]')
parser.add_argument('--eval_dir', type=str, default='eval', help='Eval dir [default: eval]')
parser.add_argument('--visu_dir', type=str, default=None, help='Visu dir [default: None, meaning no visu]')
FLAGS = parser.parse_args()
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR):
print('ERROR: log_dir %s does not exist! Please Check!' % LOG_DIR)
exit(1)
LOG_DIR = os.path.join(LOG_DIR, FLAGS.eval_dir)
if FLAGS.visu_dir is not None:
VISU_DIR = os.path.join(LOG_DIR, FLAGS.visu_dir)
force_mkdir(VISU_DIR)
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
palette = np.array(palette).reshape(3, -1).transpose()
return palette
def convert(visu_dir):
for root, dirs, files in os.walk(visu_dir):
for file in files:
if file.endswith('.pts'):
pts_file = os.path.join(root, file)
label_file = pts_file.replace('.pts', '.label')
out_file = pts_file.replace('.pts', '.png')
print('rendering: {}'.format(pts_file))
with open(pts_file) as f:
pts = np.loadtxt(f)
if os.path.exists(label_file):
print('rendering: {}'.format(label_file))
with open(label_file) as f:
label = np.loadtxt(f, dtype=np.bool)
else:
label = None
if label is not None:
pts = pts[label]
pts = np.stack([pts[:, 2], pts[:, 0], pts[:, 1]], axis=1)
v = pptk.viewer(pts)
v.set(point_size=0.01, r=5, show_grid=False, show_axis=False, lookat=[.8, .8, .8])
v.capture(out_file)
time.sleep(0.5)
print('saving: {}'.format(out_file))
# print('camera LA:', v.get('lookat'))
mask_file = pts_file.replace('.pts', '.mask')
if os.path.exists(mask_file):
with open(mask_file) as f:
mask = np.loadtxt(f).astype(np.int)
else:
mask = None
v.close()
if mask is not None:
palette = get_palette(len(np.unique(mask)))
v = pptk.viewer(pts, palette[mask, :])
v.set(point_size=0.01, r=5, show_grid=False, show_axis=False, lookat=[.8, .8, .8])
mask_out_file = out_file.replace('pts', 'mask')
if not os.path.exists(os.path.dirname(mask_out_file)):
os.mkdir(os.path.dirname(mask_out_file))
v.capture(mask_out_file)
time.sleep(0.5)
print('saving: {}'.format(mask_out_file))
v.close()
convert(VISU_DIR)
|
#!/usr/bin/env python
from spt3g import core, dfmux
import socket, argparse
import numpy as np
parser = argparse.ArgumentParser(description='Record dfmux data to a NetCDF file', prog='ledgerman')
parser.add_argument('hardware_map', metavar='/path/to/hwm.yaml', help='Path to hardware map YAML file')
parser.add_argument('output', metavar='output.nc', help='Path to output NetCDF file')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Verbose mode (print all frames)')
parser.add_argument('-u', dest='udp', action='store_true',
help='Use multicast UDP for data collection instead of '
'SCTP for compatibility with old firmware.')
parser.add_argument('-t', dest='threads', type=int, default=None,
help='Number of listener threads to use (SCTP only)')
parser.add_argument('-a', dest='align', action='store_true',
help='Align sampling. This has to happen once, but'
' will break any existing DAN loops when run.')
parser.add_argument('-s', dest='system_time', action='store_true',
help='Replace board time with system time when data'
' received. Useful if your board is in IRIG_TEST mode.')
parser.add_argument('-P', dest='physnames', action='store_true',
help='Use physical bolometer names rather than channel names')
parser.add_argument('-p', dest='pathstring', action='store', default=[], nargs='+',
help='Only record channels that match channel path string,'
' e.g. 005/5/2/3/* saves data for all bolometers on crate'
' 005, slot 5, mezzanine 2, module 3.')
parser.add_argument('-b', dest='state', action='store', default=[], nargs='+',
help='Only record bolometers that have a state that'
' matches the supplied string(s), e.g. overbiased tuned')
args = parser.parse_args()
# Import pydfmux later since it can take a while
import pydfmux
core.log_notice('Initializing hardware map and boards', unit='Ledgerman')
hwm = pydfmux.load_session(open(args.hardware_map, 'r'))['hardware_map']
if hwm.query(pydfmux.IceCrate).count() > 0:
hwm.query(pydfmux.IceCrate).resolve()
# make sure that the hardware map is consistent with what's on the IceBoards
if args.state:
hwm.query(pydfmux.Bolometer).load_bolo_states()
if args.align:
core.log_notice('Aligning board sampling, this will break any existing DAN loops!',
unit='Ledgerman')
hwm.query(pydfmux.IceBoard).set_fir_stage(6)
hwm.query(pydfmux.IceBoard).align_sampling()
core.log_notice('Beginning data acquisition', unit='Ledgerman')
# get board serial numbers only for the channels that we are recording
if args.pathstring:
chan_map_query = hwm.channel_maps_from_pstring(args.pathstring)
else:
chan_map_query = hwm.query(pydfmux.ChannelMapping)
if args.state:
chan_map_query = chan_map_query.join(pydfmux.ChannelMapping, pydfmux.Bolometer).filter(pydfmux.Bolometer.state._in(args.state))
serial_list = np.unique(np.array([cm.iceboard.serial for cm in chan_map_query]))
# Set up DfMux consumer
pipe = core.G3Pipeline()
builder = dfmux.DfMuxBuilder([int(serial) for serial in serial_list])
if args.udp:
# Get the local IP(s) to use to connect to the boards by opening test
# connections. Using a set rather than a list deduplicates the results.
local_ips = set()
for board in hwm.query(pydfmux.core.dfmux.IceBoard):
testsock = socket.create_connection(('iceboard' + board.serial + '.local', 80))
local_ips.add(testsock.getsockname()[0])
testsock.close()
core.log_notice('Creating listeners for %d boards on interfaces: %s' % (hwm.query(pydfmux.core.dfmux.IceBoard).count(), ', '.join(local_ips)), unit='Ledgerman')
# Build mapping dictionary for old (64x) firmware
v2_mapping = {'iceboard' + str(serial) + '.local': int(serial) for serial in serial_list}
# Set up listeners per network segment and point them at the event builder
collectors = [dfmux.DfMuxCollector(ip, builder, v2_mapping) for ip in local_ips]
else:
hosts = ['iceboard' + str(serial) + '.local' for serial in serial_list]
if args.threads is None:
args.threads = len(hosts)
if args.threads > len(hosts):
args.threads = len(hosts)
collectors = [dfmux.DfMuxCollector(builder, hosts[i::args.threads])
for i in range(args.threads)]
pipe.Add(builder)
# Insert current hardware map into data stream. This is critical to get the
# board ID -> IP mapping needed to do anything useful with the data
pipe.Add(dfmux.PyDfMuxHardwareMapInjector, pydfmux_hwm=hwm, pathstring=args.pathstring, state=args.state)
if args.physnames:
from spt3g import calibration
# Swap the normal bolometer names for their physical_name equivalents,
# which can make laboratory tasks simpler -- we don't care about
# long-term data archiving here.
pipe.Add(dfmux.PyDfMuxBolometerPropertiesInjector, pydfmux_hwm=hwm)
class SwapPhysLogicalNames(object):
def __init__(self):
self.calframe = None
self.wiringframe = None
self.sent = False
def __call__(self, frame):
if self.sent:
return
if frame.type == core.G3FrameType.Wiring:
self.wiringframe = frame
return []
if frame.type == core.G3FrameType.Calibration:
self.calframe = frame
return []
if self.calframe is None or self.wiringframe is None:
raise Exception('Data before wiring and cal frames!')
w = self.wiringframe['WiringMap']
del self.wiringframe['WiringMap']
c = self.calframe['NominalBolometerProperties']
new_wiring = dfmux.DfMuxWiringMap()
for k, d in w.iteritems():
new_wiring[c[k].physical_name] = d
self.wiringframe['WiringMap'] = new_wiring
self.sent = True
return [self.wiringframe, frame]
pipe.Add(SwapPhysLogicalNames)
if args.system_time:
def sub_system_time(frame):
if frame.type != core.G3FrameType.Timepoint:
return
del frame['EventHeader']
frame['EventHeader'] = core.G3Time.Now()
pipe.Add(sub_system_time)
if args.verbose:
pipe.Add(core.Dump)
pipe.Add(dfmux.NetCDFDump, filename=args.output)
for collector in collectors:
collector.Start()
pipe.Run()
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"Input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"Input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"Input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"Input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"Input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"Input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"Input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"Input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"Input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"Input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"Input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"Input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"Input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"Input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py",
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task082_BraTS2020": {
'description': "Brain tumor segmentation challenge 2020 (BraTS)\n"
"Segmentation targets are 0: background, 1: edema, 2: enhancing tumor, 3: necrosis\n"
"Input modalities are 0: T1, 1: T1ce, 2: T2, 3: FLAIR (MRI images)\n"
"Also see https://www.med.upenn.edu/cbica/brats2020/",
'url': (
"https://zenodo.org/record/4635763/files/Task082_nnUNetTrainerV2__nnUNetPlansv2.1_5fold.zip?download=1",
"https://zenodo.org/record/4635763/files/Task082_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold.zip?download=1",
"https://zenodo.org/record/4635763/files/Task082_nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold.zip?download=1",
"https://zenodo.org/record/4635763/files/Task082_nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold.zip?download=1",
)
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"Input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"Input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
"Task115_COVIDSegChallenge": {
'description': "Covid lesion segmentation in CT images. Data originates from COVID-19-20 challenge.\n"
"Predicted labels are 0: background, 1: covid lesion\n"
"Input modalities are 0: CT \n"
"See also https://covid-segmentation.grand-challenge.org/",
'url': (
"https://zenodo.org/record/4635822/files/Task115_nnUNetTrainerV2_DA3__nnUNetPlans_v2.1__3d_fullres__10folds.zip?download=1",
"https://zenodo.org/record/4635822/files/Task115_nnUNetTrainerV2_DA3_BN__nnUNetPlans_v2.1__3d_fullres__10folds.zip?download=1",
"https://zenodo.org/record/4635822/files/Task115_nnUNetTrainerV2_ResencUNet__nnUNetPlans_FabiansResUNet_v2.1__3d_fullres__10folds.zip?download=1",
"https://zenodo.org/record/4635822/files/Task115_nnUNetTrainerV2_ResencUNet_DA3__nnUNetPlans_FabiansResUNet_v2.1__3d_fullres__10folds.zip?download=1",
"https://zenodo.org/record/4635822/files/Task115_nnUNetTrainerV2_ResencUNet_DA3_BN__nnUNetPlans_FabiansResUNet_v2.1__3d_lowres__10folds.zip?download=1",
)
},
"Task135_KiTS2021": {
'description': "Kidney and kidney tumor segmentation in CT images. Data originates from KiTS2021 challenge.\n"
"Predicted labels are 0: background, 1: kidney, 2: tumor, 3: cyst \n"
"Input modalities are 0: CT \n"
"See also https://kits21.kits-challenge.org/",
'url': (
"https://zenodo.org/record/5126443/files/Task135_KiTS2021.zip?download=1",
)
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
url = av_models[taskname]['url']
if isinstance(url, str):
download_and_install_from_url(url)
elif isinstance(url, (tuple, list)):
for u in url:
download_and_install_from_url(u)
else:
raise RuntimeError('URL for download_and_install_from_url must be either str or list/tuple of str')
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
print('Downloading pretrained model from url:', url)
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
download_file(url=url, local_filename=tempfile, chunk_size=8192 * 16)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url: str, local_filename: str, chunk_size: Optional[int] = None) -> str:
# borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('######################################################')
print('')
def download_by_name():
import argparse
parser = argparse.ArgumentParser(description="Use this to download pretrained models. CAREFUL: This script will "
"overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
print_license_warning()
download_and_install_pretrained_model_by_name(taskname)
def download_by_url():
import argparse
parser = argparse.ArgumentParser(
description="Use this to download pretrained models. This script is intended to download models via url only. "
"If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. "
"CAREFUL: This script will overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model.")
parser.add_argument("url", type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1'
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListSecretsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'secrets': 'list[Secret]',
'page_info': 'PageInfo'
}
attribute_map = {
'secrets': 'secrets',
'page_info': 'page_info'
}
def __init__(self, secrets=None, page_info=None):
"""ListSecretsResponse - a model defined in huaweicloud sdk"""
super(ListSecretsResponse, self).__init__()
self._secrets = None
self._page_info = None
self.discriminator = None
if secrets is not None:
self.secrets = secrets
if page_info is not None:
self.page_info = page_info
@property
def secrets(self):
"""Gets the secrets of this ListSecretsResponse.
凭据详情列表。
:return: The secrets of this ListSecretsResponse.
:rtype: list[Secret]
"""
return self._secrets
@secrets.setter
def secrets(self, secrets):
"""Sets the secrets of this ListSecretsResponse.
凭据详情列表。
:param secrets: The secrets of this ListSecretsResponse.
:type: list[Secret]
"""
self._secrets = secrets
@property
def page_info(self):
"""Gets the page_info of this ListSecretsResponse.
:return: The page_info of this ListSecretsResponse.
:rtype: PageInfo
"""
return self._page_info
@page_info.setter
def page_info(self, page_info):
"""Sets the page_info of this ListSecretsResponse.
:param page_info: The page_info of this ListSecretsResponse.
:type: PageInfo
"""
self._page_info = page_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSecretsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: cloudformation
version_added: 1.0.0
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
The version listed in the requirements is the oldest version that works with the module as a whole.
Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
options:
stack_name:
description:
- Name of the CloudFormation stack.
required: true
type: str
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack.
default: false
type: bool
on_create_failure:
description:
- Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
choices:
- DO_NOTHING
- ROLLBACK
- DELETE
type: str
create_timeout:
description:
- The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
type: int
template_parameters:
description:
- A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
default: {}
type: dict
state:
description:
- If I(state=present), stack will be created.
- If I(state=present) and if stack exists and template has changed, it will be updated.
- If I(state=absent), stack will be removed.
default: present
choices: [ present, absent ]
type: str
template:
description:
- The local path of the CloudFormation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like C(roles/cloudformation/files/cloudformation-example.json).
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template),
I(template_body) nor I(template_url) are specified, the previous template will be reused.
type: path
notification_arns:
description:
- A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
type: str
stack_policy:
description:
- The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
type: str
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation.
- Can be updated later, updating tags removes previous entries.
type: dict
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
S3 bucket in the same region as the stack.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
the previous template will be reused.
type: str
create_changeset:
description:
- "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
- "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
deleted immediately with no changeset."
type: bool
default: false
changeset_name:
description:
- Name given to the changeset when creating a changeset.
- Only used when I(create_changeset=true).
- By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
See the AWS Change Sets docs for more information
U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
type: str
template_format:
description:
- This parameter is ignored since Ansible 2.3 and will be removed after 2022-06-01.
- Templates are now passed raw to CloudFormation regardless of format.
type: str
role_arn:
description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
type: str
termination_protection:
description:
- Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
type: bool
template_body:
description:
- Template body. Use this to pass in the actual body of the CloudFormation template.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
are specified, the previous template will be reused.
type: str
events_limit:
description:
- Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
default: 200
type: int
backoff_delay:
description:
- Number of seconds to wait for the next retry.
default: 3
type: int
required: False
backoff_max_delay:
description:
- Maximum amount of time to wait between retries.
default: 30
type: int
required: False
backoff_retries:
description:
- Number of times to retry operation.
- AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
default: 10
type: int
required: False
capabilities:
description:
- Specify capabilities that stack template contains.
- Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
type: list
elements: str
default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements: [ boto3, botocore>=1.5.45 ]
'''
EXAMPLES = '''
- name: create a cloudformation stack
amazon.aws.cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: create a stack, specify role that cloudformation assumes
amazon.aws.cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
amazon.aws.cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template via an URL
amazon.aws.cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template body via lookup template
amazon.aws.cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_body: "{{ lookup('template', 'cloudformation.j2') }}"
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
# When use_previous_value is set to True, the given value will be ignored and
# CloudFormation will use the value from a previously submitted template.
# If use_previous_value is set to False (default) the given value is used.
- amazon.aws.cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
template: "files/cloudformation-example.json"
template_parameters:
DBSnapshotIdentifier:
use_previous_value: True
value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
DBName:
use_previous_value: True
tags:
Stack: "ansible-cloudformation"
# Enable termination protection on a stack.
# If the stack already exists, this will update its termination protection
- name: enable termination protection during stack creation
amazon.aws.cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
termination_protection: yes
# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
- name: enable termination protection during stack creation
amazon.aws.cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
create_timeout: 5
# Configure rollback behaviour on the unsuccessful creation of a stack allowing
# CloudFormation to clean up, or do nothing in the event of an unsuccessful
# deployment
# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
# it fails to create
- name: create stack which will delete on creation failure
amazon.aws.cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
on_create_failure: DELETE
'''
RETURN = '''
events:
type: list
description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
returned: always
sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
log:
description: Debugging logs. Useful when modifying or finding an error.
returned: always
type: list
sample: ["updating stack"]
change_set_id:
description: The ID of the stack change set if one was created
returned: I(state=present) and I(create_changeset=true)
type: str
sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
stack_resources:
description: AWS stack resources and their status. List of dictionaries, one dict per resource.
returned: state == present
type: list
sample: [
{
"last_updated_time": "2016-10-11T19:40:14.979000+00:00",
"logical_resource_id": "CFTestSg",
"physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
"resource_type": "AWS::EC2::SecurityGroup",
"status": "UPDATE_COMPLETE",
"status_reason": null
}
]
stack_outputs:
type: dict
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: state == present
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' # NOQA
import json
import time
import traceback
import uuid
from hashlib import sha1
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils._text import to_native
from ..module_utils.core import AnsibleAWSModule
from ..module_utils.ec2 import AWSRetry
from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list
from ..module_utils.ec2 import boto_exception
def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
'describe_stack_events'
).paginate(
StackName=stack_name,
PaginationConfig={'MaxItems': events_limit}
)
if token_filter is not None:
events = list(pg.search(
"StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
))
else:
events = list(pg.search("StackEvents[*]"))
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
ret['log'].append('Stack does not exist.')
return ret
ret['log'].append('Unknown error: ' + str(error_msg))
return ret
for e in events:
eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
ret['events'].append(eventline)
if e['ResourceStatus'].endswith('FAILED'):
failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
ret['log'].append(failline)
return ret
def create_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
# 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
# 'OnFailure' only apply on creation, not update.
if module.params.get('on_create_failure') is not None:
stack_params['OnFailure'] = module.params['on_create_failure']
else:
stack_params['DisableRollback'] = module.params['disable_rollback']
if module.params.get('create_timeout') is not None:
stack_params['TimeoutInMinutes'] = module.params['create_timeout']
if module.params.get('termination_protection') is not None:
if boto_supports_termination_protection(cfn):
stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
else:
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
try:
response = cfn.create_stack(**stack_params)
# Use stack ID to follow stack state in case of on_create_failure = DELETE
result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName')))
if not result:
module.fail_json(msg="empty result")
return result
def list_changesets(cfn, stack_name):
res = cfn.list_change_sets(StackName=stack_name)
return [cs['ChangeSetName'] for cs in res['Summaries']]
def create_changeset(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required.")
if module.params['changeset_name'] is not None:
stack_params['ChangeSetName'] = module.params['changeset_name']
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
changeset_name = build_changeset_name(stack_params)
stack_params['ChangeSetName'] = changeset_name
# Determine if this changeset already exists
pending_changesets = list_changesets(cfn, stack_params['StackName'])
if changeset_name in pending_changesets:
warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
else:
cs = cfn.create_change_set(**stack_params)
# Make sure we don't enter an infinite loop
time_end = time.time() + 600
while time.time() < time_end:
try:
newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
except botocore.exceptions.BotoCoreError as err:
module.fail_json_aws(err)
if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
time.sleep(1)
elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
cfn.delete_change_set(ChangeSetName=cs['Id'])
result = dict(changed=False,
output='The created Change Set did not contain any changes to this stack and was deleted.')
# a failed change set does not trigger any stack events so we just want to
# skip any further processing of result and just return it directly
return result
else:
break
# Lets not hog the cpu/spam the AWS API
time.sleep(1)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
result['change_set_id'] = cs['Id']
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json_aws(err, msg='Failed to create change set')
if not result:
module.fail_json(msg="empty result")
return result
def update_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
stack_params['UsePreviousTemplate'] = True
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName')))
if not result:
module.fail_json(msg="empty result")
return result
def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
'''updates termination protection of a stack'''
if not boto_supports_termination_protection(cfn):
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
stack = get_stack_facts(cfn, stack_name)
if stack:
if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
try:
cfn.update_termination_protection(
EnableTerminationProtection=desired_termination_protection_state,
StackName=stack_name)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
def boto_supports_termination_protection(cfn):
'''termination protection was added in botocore 1.7.18'''
return hasattr(cfn, "update_termination_protection")
def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
while True:
try:
stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except Exception:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
return ret
elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
return ret
# note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
return ret
# note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
elif stack['StackStatus'].endswith('_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
return ret
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
if 'ChangeSetName' in stack_params:
return stack_params['ChangeSetName']
json_params = json.dumps(stack_params, sort_keys=True)
return 'Ansible-{0}-{1}'.format(
stack_params['StackName'],
sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
)
def check_mode_changeset(module, stack_params, cfn):
"""Create a change set, describe it and delete it before returning check mode outputs."""
stack_params['ChangeSetName'] = build_changeset_name(stack_params)
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
time.sleep(5)
else:
# if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
cfn.delete_change_set(ChangeSetName=change_set['Id'])
reason = description.get('StatusReason')
if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
return {'changed': True, 'msg': reason, 'meta': description['Changes']}
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
module.fail_json_aws(err)
def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def main():
argument_spec = dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
create_timeout=dict(default=None, type='int'),
template_url=dict(default=None, required=False),
template_body=dict(default=None, required=False),
template_format=dict(removed_at_date='2022-06-01', removed_from_collection='amazon.aws'),
create_changeset=dict(default=False, type='bool'),
changeset_name=dict(default=None, required=False),
role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict'),
termination_protection=dict(default=None, type='bool'),
events_limit=dict(default=200, type='int'),
backoff_retries=dict(type='int', default=10, required=False),
backoff_delay=dict(type='int', default=3, required=False),
backoff_max_delay=dict(type='int', default=30, required=False),
capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template', 'template_body'],
['disable_rollback', 'on_create_failure']],
supports_check_mode=True
)
invalid_capabilities = []
user_capabilities = module.params.get('capabilities')
for user_cap in user_capabilities:
if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
invalid_capabilities.append(user_cap)
if invalid_capabilities:
module.fail_json(msg="Specified capabilities are invalid : %r,"
" please check documentation for valid capabilities" % invalid_capabilities)
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {
'Capabilities': user_capabilities,
'ClientRequestToken': to_native(uuid.uuid4()),
}
state = module.params['state']
stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is not None:
with open(module.params['template'], 'r') as template_fh:
stack_params['TemplateBody'] = template_fh.read()
elif module.params['template_body'] is not None:
stack_params['TemplateBody'] = module.params['template_body']
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
if module.params.get('notification_arns'):
stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
else:
stack_params['NotificationARNs'] = []
# can't check the policy when verifying.
if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
with open(module.params['stack_policy'], 'r') as stack_policy_fh:
stack_params['StackPolicyBody'] = stack_policy_fh.read()
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = []
for k, v in template_parameters.items():
if isinstance(v, dict):
# set parameter based on a dict to allow additional CFN Parameter Attributes
param = dict(ParameterKey=k)
if 'value' in v:
param['ParameterValue'] = str(v['value'])
if 'use_previous_value' in v and bool(v['use_previous_value']):
param['UsePreviousValue'] = True
param.pop('ParameterValue', None)
stack_params['Parameters'].append(param)
else:
# allow default k/v configuration to set a template parameter
stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('role_arn'):
stack_params['RoleARN'] = module.params['role_arn']
result = {}
cfn = module.client('cloudformation')
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
backoff_wrapper = AWSRetry.jittered_backoff(
retries=module.params.get('backoff_retries'),
delay=module.params.get('backoff_delay'),
max_delay=module.params.get('backoff_max_delay')
)
cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
cfn.create_stack = backoff_wrapper(cfn.create_stack)
cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
cfn.update_stack = backoff_wrapper(cfn.update_stack)
cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
if boto_supports_termination_protection(cfn):
cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
stack_info = get_stack_facts(cfn, stack_params['StackName'])
if module.check_mode:
if state == 'absent' and stack_info:
module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
elif state == 'absent' and not stack_info:
module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
elif state == 'present' and not stack_info:
module.exit_json(changed=True, msg='New stack would be created', meta=[])
else:
module.exit_json(**check_mode_changeset(module, stack_params, cfn))
if state == 'present':
if not stack_info:
result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
elif module.params.get('create_changeset'):
result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
else:
if module.params.get('termination_protection') is not None:
update_termination_protection(module, cfn, stack_params['StackName'],
bool(module.params.get('termination_protection')))
result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
# format the stack output
stack = get_stack_facts(cfn, stack_params['StackName'])
if stack is not None:
if result.get('stack_outputs') is None:
# always define stack_outputs, but it may be empty
result['stack_outputs'] = {}
for output in stack.get('Outputs', []):
result['stack_outputs'][output['OutputKey']] = output['OutputValue']
stack_resources = []
reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
"logical_resource_id": res['LogicalResourceId'],
"physical_resource_id": res.get('PhysicalResourceId', ''),
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
elif state == 'absent':
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
try:
stack = get_stack_facts(cfn, stack_params['StackName'])
if not stack:
result = {'changed': False, 'output': 'Stack not found.'}
else:
if stack_params.get('RoleARN') is None:
cfn.delete_stack(StackName=stack_params['StackName'])
else:
cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
stack_params.get('ClientRequestToken', None))
except Exception as err:
module.fail_json_aws(err)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import os
import sys
import errno
# from .osutils import mkdir_if_missing
def mkdir_if_missing(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Proactive Learning documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 2 00:27:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Proactive Learning'
copyright = u'2015, Carlos González Goce'
author = u'Carlos González Goce'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'es'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = u'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
html_search_language = 'es'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProactiveLearningdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProactiveLearning.tex', 'Proactive Learning Documentation',
u'Carlos González Goce', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'proactivelearning', 'Proactive Learning Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProactiveLearning', 'Proactive Learning Documentation',
author, 'ProactiveLearning', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Custom configuration
# Better PHP lexer including symfony api calls
# More here https://github.com/fabpot/sphinx-php
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
# Add TO DO extension
extensions.append('sphinx.ext.todo')
todo_include_todos = True
|
from ...conversions import hex_to_bytes
from ...set1.challenge1_2 import xor_bytes
from ...set4.challenge28 import sha1_hash
def hmac_sha1(key, message):
block_size = 64
if len(key) > block_size:
key = sha1_hash(message)
# have to modify key because key might be too short after hashing
if len(key) < block_size:
padding_amount = block_size - len(key)
key = key + b"\0" * padding_amount
outer_key = xor_single_byte(key, 0x5C)
inner_key = xor_single_byte(key, 0x36)
return sha1_hash(outer_key + sha1_hash(inner_key + message))
def xor_single_byte(message, byte_value):
"""Xor all bytes in message with the byte number byte_value"""
as_bytestring = bytes([byte_value] * len(message))
return xor_bytes(as_bytestring, message)
def test_hmac_sha1():
# HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog") = de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9
key = b"key"
message = b"The quick brown fox jumps over the lazy dog"
expected_hash = hex_to_bytes("de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9")
calculated_hash = hmac_sha1(key, message)
correct = expected_hash == calculated_hash
print(f"Expected: {expected_hash!r}")
print(f"Calculated: {calculated_hash}")
print(f"Correct? {correct}")
assert correct
if __name__ == "__main__":
test_hmac_sha1()
|
### Import Libs
import pandas
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
### Importing from other files
from Supermarket_Scraping.Parameters.supermarket_parameters import (
supermarket_parameters,
supermarket_list,
path_chrome
)
from Supermarket_Scraping.Utility_Functions.General_Utilities import slow_typing
from Supermarket_Scraping.Parameters.supermarket_parameters import supermarket_parameters
### Main Function
def supermarket_sweeper(supermarkets, products, supermarket_parameters):
### Set up main list
main_product_list = []
main_price_list = []
### Set up back up list
reserve_product_list = []
reserve_price_list = []
### Set up a loop
driver = webdriver.Chrome(executable_path=path_chrome)
# Take a supermarket
for supermarket_name in supermarkets:
# Take a product
for product_name in products:
### Get page
driver.get(supermarket_parameters[supermarket_name]["home_url_id"])
### Slow down speed for memory
time.sleep(2)
### Find the search box
element = driver.find_element_by_css_selector(
supermarket_parameters[supermarket_name]["search_bar_id"]
)
element.click()
### Type in name of product
slow_typing(element, product_name)
### Find the button to search
submit_button = driver.find_element_by_css_selector(
supermarket_parameters[supermarket_name]["search_button_path"]
)
### Search
submit_button.click()
### Pause for memory
time.sleep(5)
### Click on organise by cheapest
### Asda version
if supermarket_name == "Asda":
### Pause
time.sleep(1)
### Find the select button
sort_opt = driver.find_element_by_css_selector(
supermarket_parameters["Asda"]["css_id_organise"]
)
### Click button
sort_opt.click()
### Pause
time.sleep(3)
### Find the lowest price button
select_opt = driver.find_elements_by_css_selector(
supermarket_parameters["Asda"]["css_option_text"]
)
### Click the button
select_opt[1].click()
### Sleep
time.sleep(1)
driver.find_element_by_css_selector(
supermarket_parameters["Asda"]["footer_button"]
).click()
### Handling standard methods
else:
el = driver.find_element_by_css_selector(
supermarket_parameters[supermarket_name]["css_id_organise"]
)
for option in el.find_elements_by_tag_name("option"):
if (
option.text
== supermarket_parameters[supermarket_name]["css_option_text"]
):
option.click()
break
### Pause for memory
time.sleep(5)
# Turn into Beautiful Soup
soup = BeautifulSoup(driver.page_source, "html.parser")
### Modify For Morrisons Sidebar issue:
if supermarket_name == "Morrisons":
soup = soup.find(
supermarket_parameters["Morrisons"]["main_area_class"],
{"class": supermarket_parameters["Morrisons"]["main_area_name"]},
)
### Extract product name
main_prod = soup.find_all(
supermarket_parameters[supermarket_name]["prod_name_type"],
{"class": supermarket_parameters[supermarket_name]["prod_name_class"]},
)[0]["alt"]
### Extract back up product name
rese_prod = soup.find_all(
supermarket_parameters[supermarket_name]["prod_name_type"],
{"class": supermarket_parameters[supermarket_name]["prod_name_class"]},
)[1]["alt"]
## Extract prices
main_price = soup.find_all(
supermarket_parameters[supermarket_name]["prod_price_type"],
{"class": supermarket_parameters[supermarket_name]["prod_price_class"]},
)[0].text
rese_price = soup.find_all(
supermarket_parameters[supermarket_name]["prod_price_type"],
{"class": supermarket_parameters[supermarket_name]["prod_price_class"]},
)[1].text
if supermarket_name == "Tesco":
main_price = soup.find_all(
supermarket_parameters[supermarket_name]["prod_price_type"],
{
"class": supermarket_parameters[supermarket_name][
"prod_price_class"
]
},
)[0].text
rese_price = soup.find_all(
supermarket_parameters[supermarket_name]["prod_price_type"],
{
"class": supermarket_parameters[supermarket_name][
"prod_price_class"
]
},
)[2].text
### Appending all to lists
main_product_list.append(main_prod)
reserve_product_list.append(rese_prod)
main_price_list.append(main_price)
reserve_price_list.append(rese_price)
return (
main_product_list,
reserve_product_list,
main_price_list,
reserve_price_list,
)
supermarket_sweeper(supermarkets = supermarket_list,\
products=['Apple', 'Bannana'],\
supermarket_parameters = supermarket_parameters)
|
import abc
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import utils
class ContinualLearner(nn.Module, metaclass=abc.ABCMeta):
'''Abstract module to add continual learning capabilities to a classifier.'''
def __init__(self):
super().__init__()
#----------------- EWC-specifc parameters -----------------#
self.ewc = False
self.ewc_lambda = 5000 #-> hyperparam: how strong to weigh EWC-loss ("regularisation strength")
self.gamma = 1. #-> hyperparam (online EWC): decay-term for old tasks' contribution to quadratic term
self.online = True #-> "online" (=single quadratic term) or "offline" (=quadratic term per task) EWC
self.fisher_n = None #-> sample size for estimating FI-matrix (if "None", full pass over dataset)
self.emp_FI = False #-> if True, use provided labels to calculate FI ("empirical FI"); else predicted labels
self.EWC_task_count = 0 #-> keeps track of number of quadratic loss terms (for "offline EWC")
#----------------- Distillation-specifc parameters -----------------#
self.distill = False
self.KD_temp = 2.0
#----------------- EWC-specifc functions -----------------#s
def estimate_fisher(self, dataset, allowed_classes=None, collate_fn=None):
'''After completing training on a task, estimate diagonal of Fisher Information matrix.
[dataset]: <DataSet> to be used to estimate FI-matrix
[allowed_classes]: <list> with class-indeces of 'allowed' or 'active' classes'''
# Prepare <dict> to store estimated Fisher Information matrix
est_fisher_info = {}
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
est_fisher_info[n] = p.detach().clone().zero_()
# Set model to evaluation mode
mode = self.training
self.eval()
# Create data-loader to give batches of size 1
data_loader = utils.get_data_loader(dataset, batch_size=1, cuda=self._is_on_cuda(), collate_fn=collate_fn)
# Estimate the FI-matrix for [self.fisher_n] batches of size 1
for index,(x,y) in enumerate(data_loader):
# break from for-loop if max number of samples has been reached
if self.fisher_n is not None:
if index >= self.fisher_n:
break
# run forward pass of model
x = x.to(self._device())
output = self(x) if allowed_classes is None else self(x)[:, allowed_classes]
if self.emp_FI:
# -use provided label to calculate loglikelihood --> "empirical Fisher":
label = torch.LongTensor([y]) if type(y)==int else y
if allowed_classes is not None:
label = [int(np.where(i == allowed_classes)[0][0]) for i in label.numpy()]
label = torch.LongTensor(label)
label = label.to(self._device())
else:
# -use predicted label to calculate loglikelihood:
label = output.max(1)[1]
# calculate negative log-likelihood
negloglikelihood = F.nll_loss(F.log_softmax(output, dim=1), label)
# Calculate gradient of negative loglikelihood
self.zero_grad()
negloglikelihood.backward()
# Square gradients and keep running sum
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
if p.grad is not None:
est_fisher_info[n] += p.grad.detach() ** 2
# Normalize by sample size used for estimation
est_fisher_info = {n: p/index for n, p in est_fisher_info.items()}
# Store new values in the network
for n, p in self.named_parameters():
if p.requires_grad:
n = n.replace('.', '__')
# -mode (=MAP parameter estimate)
self.register_buffer('{}_EWC_prev_task{}'.format(n, "" if self.online else self.EWC_task_count+1),
p.detach().clone())
# -accuracy (approximated by diagonal Fisher Information matrix)
if self.online and self.EWC_task_count==1:
existing_values = getattr(self, '{}_EWC_estimated_fisher'.format(n))
est_fisher_info[n] += self.gamma * existing_values
self.register_buffer('{}_EWC_estimated_fisher{}'.format(n, "" if self.online else self.EWC_task_count+1),
est_fisher_info[n])
# If "offline EWC", increase task-count (for "online EWC", set it to 1 to indicate EWC-loss can be calculated)
self.EWC_task_count = 1 if self.online else self.EWC_task_count + 1
# Set model back to its initial mode
self.train(mode=mode)
def ewc_loss(self):
'''Calculate EWC-loss.'''
if self.EWC_task_count>0:
losses = []
# If "offline EWC", loop over all previous tasks (if "online EWC", [EWC_task_count]=1 so only 1 iteration)
for task in range(1, self.EWC_task_count+1):
for n, p in self.named_parameters():
if p.requires_grad:
# Retrieve stored mode (MAP estimate) and accuracy (Fisher Information matrix)
n = n.replace('.', '__')
mean = getattr(self, '{}_EWC_prev_task{}'.format(n, "" if self.online else task))
fisher = getattr(self, '{}_EWC_estimated_fisher{}'.format(n, "" if self.online else task))
# If "online EWC", apply decay-term to the running sum of the Fisher Information matrices
fisher = self.gamma*fisher if self.online else fisher
# Calculate EWC-loss
losses.append((fisher * (p-mean)**2).sum())
# Sum EWC-loss from all parameters (and from all tasks, if "offline EWC")
return (1./2)*sum(losses)
else:
# EWC-loss is 0 if there are no stored mode and accuracy yet
return torch.tensor(0., device=self._device())
def _device(self):
return next(self.parameters()).device
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
@abc.abstractmethod
def forward(self, x):
pass
|
# stdlib
from typing import List
from typing import Optional
from typing import Type
# third party
from nacl.signing import VerifyKey
# relative
from ....abstract.node_service_interface import NodeServiceInterface
from ..auth import service_auth
from ..node_service import ImmediateNodeServiceWithReply
from .peer_discovery_messages import PeerDiscoveryMessage
from .peer_discovery_messages import PeerDiscoveryMessageWithReply
from .peer_discovery_messages import PeerDiscoveryReplyMessage
class PeerDiscoveryService(ImmediateNodeServiceWithReply):
@staticmethod
@service_auth(guests_welcome=True)
def process(
node: NodeServiceInterface,
msg: PeerDiscoveryMessage,
verify_key: Optional[VerifyKey] = None,
) -> PeerDiscoveryReplyMessage:
# this service requires no verify_key because its currently public
result = msg.payload.run(node=node, verify_key=verify_key)
return PeerDiscoveryMessageWithReply(kwargs=result).back_to(
address=msg.reply_to
)
@staticmethod
def message_handler_types() -> List[Type[PeerDiscoveryMessage]]:
return [PeerDiscoveryMessage]
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_storage_class_list import V1beta1StorageClassList
class TestV1beta1StorageClassList(unittest.TestCase):
""" V1beta1StorageClassList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1StorageClassList(self):
"""
Test V1beta1StorageClassList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_storage_class_list.V1beta1StorageClassList()
pass
if __name__ == '__main__':
unittest.main()
|
"""
Run a virtual screen baseline:
starting from an initial pool, we randomly sample the next point
from the rest of the dataset, instead of synthesizing it from that pool.
This simulates a situation of virtual screening and doesn't account for
the cost of discovery of new compounds.
"""
import numpy as np
from argparse import ArgumentParser
from mols.mol_functions import get_objective_by_name
from datasets.loaders import MolSampler
from mols.mol_functions import get_objective_by_name
def parse_args():
parser = ArgumentParser()
parser.add_argument('-d', '--dataset', default='chembl', type=str,
help='dataset: chembl or zinc250')
parser.add_argument('-s', '--seed', default=42, type=int,
help='sampling seed for the dataset')
parser.add_argument('-o', '--objective', default='qed', type=str,
help='which objective function to use: qed or logp')
parser.add_argument('-b', '--budget', default=100, type=int,
help='computational budget (# of function `evaluations`)')
parser.add_argument('-i', '--init_pool_size', default=20, type=int,
help='size of initial pool')
parser.add_argument('--num_repl', default=1, type=int,
help='number of replications of virtual screening')
return parser.parse_args()
def run_screen(init_pool_size, seed, budget, objective, dataset, iter_num):
obj_func = get_objective_by_name(objective)
sampler = MolSampler(dataset, sampling_seed=seed+iter_num)
pool = sampler(init_pool_size)
real_budget = budget - init_pool_size
opt_val = max([obj_func(mol) for mol in pool])
for i in range(real_budget):
# pick a new point randomly
new_point = sampler(1)[0]
opt_val = max(obj_func(new_point), opt_val)
pool.append(new_point)
print("Optimal value: {:.3f}".format(opt_val))
return opt_val
if __name__ == "__main__":
args = parse_args()
exp_settings = vars(args)
num_repl = exp_settings.pop('num_repl')
opt_vals = []
for iter_num in range(num_repl):
opt_vals.append(run_screen(**exp_settings, iter_num=iter_num))
print("Average {} value with virtual screening: {:.3} +- std {:.3}"\
.format(exp_settings['objective'], np.mean(opt_vals), np.std(opt_vals)))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
@tvm.testing.requires_cuda
def test_thread_storage_sync():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, A2], stmt, None)
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
cuda_target = tvm.target.Target("cuda")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod._move())
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["test_kernel0"]
mod = tvm.IRModule.from_expr(fdevice)
cuda_target = tvm.target.Target("cuda")
f = tvm.tir.transform.ThreadSync("shared")(mod)["test_kernel0"]
body_list = tvm.tir.stmt_list(f.body.body.body.body)
assert body_list[1].value.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync"))
if __name__ == "__main__":
test_thread_storage_sync()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot help command """
import asyncio
from userbot import bot, CMD_HELP
from userbot.events import geezbot_cmd
from userbot import CUSTOM_CMD as geez
modules = CMD_HELP
@bot.on(geezbot_cmd(outgoing=True, pattern="help(?: |$)(.*)"))
async def help(event):
""" For .help command,"""
args = event.pattern_match.group(1).lower()
if args:
if args in CMD_HELP:
await event.edit(str(CMD_HELP[args]))
else:
await event.edit("**Module Salah Goblokkkk!!**")
await asyncio.sleep(18)
await event.delete()
else:
await event.edit(f"**╭────────────────────**\
\n│ Help for Cangcut-userbot \
\n╰────────────────── \
\n╭────────────────────\
\n│ Untuk melihat lengkap CMD\
\n│ Contoh: {geez}help <nama module>\
\n│ Modules Aktif: {len(modules)}\
\n╰─────────────────")
string = ""
for i in CMD_HELP:
string += "`" + str(i)
string += "`\t🔸 "
await event.reply("───────────────────\n" f"🔸{string}•"
"\n───────────────────")
await asyncio.sleep(100)
await event.delete()
|
from gcloud.storage.connection import Connection
from gcloud.credentials import get_credentials
from gcloud import storage
from gevent.local import local
from httplib2 import Http
def connect(creds):
"""Construct a connection value to Google Storage API
The credentials are retrieved using get_credentials that checks
the environment for the correct values.
"""
credentials = get_credentials()
return storage.Client(credentials=credentials,
http=ThreadSafeHttp(credentials))
class ThreadSafeHttp(object):
__scoped_credentials = None
__local = local()
def __init__(self, creds):
self.__scoped_credentials = Connection._create_scoped_credentials(
creds, Connection.SCOPE)
def __getattr__(self, name):
if not hasattr(self.__local, 'http'):
self.__local.http = self.__scoped_credentials.authorize(Http())
return getattr(self.__local.http, name)
|
import pandas as pd
import numpy as np
import click
import sys
from .runner import PyProphetLearner, PyProphetWeightApplier
from .ipf import infer_peptidoforms
from .levels_contexts import infer_peptides, infer_proteins, infer_genes, subsample_osw, reduce_osw, merge_osw, backpropagate_oswr
from .export import export_tsv, export_score_plots
from .export_compound import export_compound_tsv
from .filter import filter_sqmass
from .data_handling import (transform_pi0_lambda, transform_threads, transform_subsample_ratio, check_sqlite_table)
from functools import update_wrapper
import sqlite3
from tabulate import tabulate
from hyperopt import hp
try:
profile
except NameError:
def profile(fun):
return fun
@click.group(chain=True)
@click.version_option()
def cli():
"""
PyProphet: Semi-supervised learning and scoring of OpenSWATH results.
Visit http://openswath.org for usage instructions and help.
"""
# PyProphet semi-supervised learning and scoring
@cli.command()
# # File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='PyProphet output file.')
# Semi-supervised learning
@click.option('--classifier', default='LDA', show_default=True, type=click.Choice(['LDA', 'XGBoost']), help='Either a "LDA" or "XGBoost" classifier is used for semi-supervised learning.')
@click.option('--xgb_autotune/--no-xgb_autotune', default=False, show_default=True, help='XGBoost: Autotune hyperparameters.')
@click.option('--apply_weights', type=click.Path(exists=True), help='Apply PyProphet score weights file instead of semi-supervised learning.')
@click.option('--xeval_fraction', default=0.5, show_default=True, type=float, help='Data fraction used for cross-validation of semi-supervised learning step.')
@click.option('--xeval_num_iter', default=10, show_default=True, type=int, help='Number of iterations for cross-validation of semi-supervised learning step.')
@click.option('--ss_initial_fdr', default=0.15, show_default=True, type=float, help='Initial FDR cutoff for best scoring targets.')
@click.option('--ss_iteration_fdr', default=0.05, show_default=True, type=float, help='Iteration FDR cutoff for best scoring targets.')
@click.option('--ss_num_iter', default=10, show_default=True, type=int, help='Number of iterations for semi-supervised learning step.')
@click.option('--ss_main_score', default="var_xcorr_shape", show_default=True, type=str, help='Main score to start semi-supervised-learning.')
@click.option('--ss_score_filter', default='', help='Specify scores which should used for scoring. In addition specific predefined profiles can be used. For example for metabolomis data use "metabolomics". Please specify any additional input as follows: "var_ms1_xcorr_coelution,var_library_corr,var_xcorr_coelution,etc."')
# Statistics
@click.option('--group_id', default="group_id", show_default=True, type=str, help='Group identifier for calculation of statistics.')
@click.option('--parametric/--no-parametric', default=False, show_default=True, help='Do parametric estimation of p-values.')
@click.option('--pfdr/--no-pfdr', default=False, show_default=True, help='Compute positive false discovery rate (pFDR) instead of FDR.')
@click.option('--pi0_lambda', default=[0.1,0.5,0.05], show_default=True, type=(float, float, float), help='Use non-parametric estimation of p-values. Either use <START END STEPS>, e.g. 0.1, 1.0, 0.1 or set to fixed value, e.g. 0.4, 0, 0.', callback=transform_pi0_lambda)
@click.option('--pi0_method', default='bootstrap', show_default=True, type=click.Choice(['smoother', 'bootstrap']), help='Either "smoother" or "bootstrap"; the method for automatically choosing tuning parameter in the estimation of pi_0, the proportion of true null hypotheses.')
@click.option('--pi0_smooth_df', default=3, show_default=True, type=int, help='Number of degrees-of-freedom to use when estimating pi_0 with a smoother.')
@click.option('--pi0_smooth_log_pi0/--no-pi0_smooth_log_pi0', default=False, show_default=True, help='If True and pi0_method = "smoother", pi0 will be estimated by applying a smoother to a scatterplot of log(pi0) estimates against the tuning parameter lambda.')
@click.option('--lfdr_truncate/--no-lfdr_truncate', show_default=True, default=True, help='If True, local FDR values >1 are set to 1.')
@click.option('--lfdr_monotone/--no-lfdr_monotone', show_default=True, default=True, help='If True, local FDR values are non-decreasing with increasing p-values.')
@click.option('--lfdr_transformation', default='probit', show_default=True, type=click.Choice(['probit', 'logit']), help='Either a "probit" or "logit" transformation is applied to the p-values so that a local FDR estimate can be formed that does not involve edge effects of the [0,1] interval in which the p-values lie.')
@click.option('--lfdr_adj', default=1.5, show_default=True, type=float, help='Numeric value that is applied as a multiple of the smoothing bandwidth used in the density estimation.')
@click.option('--lfdr_eps', default=np.power(10.0,-8), show_default=True, type=float, help='Numeric value that is threshold for the tails of the empirical p-value distribution.')
# OpenSWATH options
@click.option('--level', default='ms2', show_default=True, type=click.Choice(['ms1', 'ms2', 'ms1ms2', 'transition']), help='Either "ms1", "ms2", "ms1ms2" or "transition"; the data level selected for scoring. "ms1ms2 integrates both MS1- and MS2-level scores and can be used instead of "ms2"-level results."')
# IPF options
@click.option('--ipf_max_peakgroup_rank', default=1, show_default=True, type=int, help='Assess transitions only for candidate peak groups until maximum peak group rank.')
@click.option('--ipf_max_peakgroup_pep', default=0.7, show_default=True, type=float, help='Assess transitions only for candidate peak groups until maximum posterior error probability.')
@click.option('--ipf_max_transition_isotope_overlap', default=0.5, show_default=True, type=float, help='Maximum isotope overlap to consider transitions in IPF.')
@click.option('--ipf_min_transition_sn', default=0, show_default=True, type=float, help='Minimum log signal-to-noise level to consider transitions in IPF. Set -1 to disable this filter.')
# TRIC
@click.option('--tric_chromprob/--no-tric_chromprob', default=False, show_default=True, help='Whether chromatogram probabilities for TRIC should be computed.')
# Processing
@click.option('--threads', default=1, show_default=True, type=int, help='Number of threads used for semi-supervised learning. -1 means all available CPUs.', callback=transform_threads)
@click.option('--test/--no-test', default=False, show_default=True, help='Run in test mode with fixed seed.')
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter):
"""
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
# Prepare XGBoost-specific parameters
xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}
xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights, ss_score_filter).run()
# IPF
@cli.command()
# File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='PyProphet output file.')
# IPF parameters
@click.option('--ipf_ms1_scoring/--no-ipf_ms1_scoring', default=True, show_default=True, help='Use MS1 precursor data for IPF.')
@click.option('--ipf_ms2_scoring/--no-ipf_ms2_scoring', default=True, show_default=True, help='Use MS2 precursor data for IPF.')
@click.option('--ipf_h0/--no-ipf_h0', default=True, show_default=True, help='Include possibility that peak groups are not covered by peptidoform space.')
@click.option('--ipf_grouped_fdr/--no-ipf_grouped_fdr', default=False, show_default=True, help='[Experimental] Compute grouped FDR instead of pooled FDR to better support data where peak groups are evaluated to originate from very heterogeneous numbers of peptidoforms.')
@click.option('--ipf_max_precursor_pep', default=0.7, show_default=True, type=float, help='Maximum PEP to consider scored precursors in IPF.')
@click.option('--ipf_max_peakgroup_pep', default=0.7, show_default=True, type=float, help='Maximum PEP to consider scored peak groups in IPF.')
@click.option('--ipf_max_precursor_peakgroup_pep', default=0.4, show_default=True, type=float, help='Maximum BHM layer 1 integrated precursor peakgroup PEP to consider in IPF.')
@click.option('--ipf_max_transition_pep', default=0.6, show_default=True, type=float, help='Maximum PEP to consider scored transitions in IPF.')
def ipf(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep):
"""
Infer peptidoforms after scoring of MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptidoforms(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep)
# Peptide-level inference
@cli.command()
# File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='PyProphet output file.')
# Context
@click.option('--context', default='run-specific', show_default=True, type=click.Choice(['run-specific', 'experiment-wide', 'global']), help='Context to estimate protein-level FDR control.')
# Statistics
@click.option('--parametric/--no-parametric', default=False, show_default=True, help='Do parametric estimation of p-values.')
@click.option('--pfdr/--no-pfdr', default=False, show_default=True, help='Compute positive false discovery rate (pFDR) instead of FDR.')
@click.option('--pi0_lambda', default=[0.1,0.5,0.05], show_default=True, type=(float, float, float), help='Use non-parametric estimation of p-values. Either use <START END STEPS>, e.g. 0.1, 1.0, 0.1 or set to fixed value, e.g. 0.4, 0, 0.', callback=transform_pi0_lambda)
@click.option('--pi0_method', default='bootstrap', show_default=True, type=click.Choice(['smoother', 'bootstrap']), help='Either "smoother" or "bootstrap"; the method for automatically choosing tuning parameter in the estimation of pi_0, the proportion of true null hypotheses.')
@click.option('--pi0_smooth_df', default=3, show_default=True, type=int, help='Number of degrees-of-freedom to use when estimating pi_0 with a smoother.')
@click.option('--pi0_smooth_log_pi0/--no-pi0_smooth_log_pi0', default=False, show_default=True, help='If True and pi0_method = "smoother", pi0 will be estimated by applying a smoother to a scatterplot of log(pi0) estimates against the tuning parameter lambda.')
@click.option('--lfdr_truncate/--no-lfdr_truncate', show_default=True, default=True, help='If True, local FDR values >1 are set to 1.')
@click.option('--lfdr_monotone/--no-lfdr_monotone', show_default=True, default=True, help='If True, local FDR values are non-decreasing with increasing p-values.')
@click.option('--lfdr_transformation', default='probit', show_default=True, type=click.Choice(['probit', 'logit']), help='Either a "probit" or "logit" transformation is applied to the p-values so that a local FDR estimate can be formed that does not involve edge effects of the [0,1] interval in which the p-values lie.')
@click.option('--lfdr_adj', default=1.5, show_default=True, type=float, help='Numeric value that is applied as a multiple of the smoothing bandwidth used in the density estimation.')
@click.option('--lfdr_eps', default=np.power(10.0,-8), show_default=True, type=float, help='Numeric value that is threshold for the tails of the empirical p-value distribution.')
def peptide(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer peptides and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptides(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps)
# Gene-level inference
@cli.command()
# File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='PyProphet output file.')
# Context
@click.option('--context', default='run-specific', show_default=True, type=click.Choice(['run-specific', 'experiment-wide', 'global']), help='Context to estimate gene-level FDR control.')
# Statistics
@click.option('--parametric/--no-parametric', default=False, show_default=True, help='Do parametric estimation of p-values.')
@click.option('--pfdr/--no-pfdr', default=False, show_default=True, help='Compute positive false discovery rate (pFDR) instead of FDR.')
@click.option('--pi0_lambda', default=[0.1,0.5,0.05], show_default=True, type=(float, float, float), help='Use non-parametric estimation of p-values. Either use <START END STEPS>, e.g. 0.1, 1.0, 0.1 or set to fixed value, e.g. 0.4, 0, 0.', callback=transform_pi0_lambda)
@click.option('--pi0_method', default='bootstrap', show_default=True, type=click.Choice(['smoother', 'bootstrap']), help='Either "smoother" or "bootstrap"; the method for automatically choosing tuning parameter in the estimation of pi_0, the proportion of true null hypotheses.')
@click.option('--pi0_smooth_df', default=3, show_default=True, type=int, help='Number of degrees-of-freedom to use when estimating pi_0 with a smoother.')
@click.option('--pi0_smooth_log_pi0/--no-pi0_smooth_log_pi0', default=False, show_default=True, help='If True and pi0_method = "smoother", pi0 will be estimated by applying a smoother to a scatterplot of log(pi0) estimates against the tuning parameter lambda.')
@click.option('--lfdr_truncate/--no-lfdr_truncate', show_default=True, default=True, help='If True, local FDR values >1 are set to 1.')
@click.option('--lfdr_monotone/--no-lfdr_monotone', show_default=True, default=True, help='If True, local FDR values are non-decreasing with increasing p-values.')
@click.option('--lfdr_transformation', default='probit', show_default=True, type=click.Choice(['probit', 'logit']), help='Either a "probit" or "logit" transformation is applied to the p-values so that a local FDR estimate can be formed that does not involve edge effects of the [0,1] interval in which the p-values lie.')
@click.option('--lfdr_adj', default=1.5, show_default=True, type=float, help='Numeric value that is applied as a multiple of the smoothing bandwidth used in the density estimation.')
@click.option('--lfdr_eps', default=np.power(10.0,-8), show_default=True, type=float, help='Numeric value that is threshold for the tails of the empirical p-value distribution.')
def gene(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer genes and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_genes(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps)
# Protein-level inference
@cli.command()
# File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='PyProphet output file.')
# Context
@click.option('--context', default='run-specific', show_default=True, type=click.Choice(['run-specific', 'experiment-wide', 'global']), help='Context to estimate protein-level FDR control.')
# Statistics
@click.option('--parametric/--no-parametric', default=False, show_default=True, help='Do parametric estimation of p-values.')
@click.option('--pfdr/--no-pfdr', default=False, show_default=True, help='Compute positive false discovery rate (pFDR) instead of FDR.')
@click.option('--pi0_lambda', default=[0.1,0.5,0.05], show_default=True, type=(float, float, float), help='Use non-parametric estimation of p-values. Either use <START END STEPS>, e.g. 0.1, 1.0, 0.1 or set to fixed value, e.g. 0.4, 0, 0.', callback=transform_pi0_lambda)
@click.option('--pi0_method', default='bootstrap', show_default=True, type=click.Choice(['smoother', 'bootstrap']), help='Either "smoother" or "bootstrap"; the method for automatically choosing tuning parameter in the estimation of pi_0, the proportion of true null hypotheses.')
@click.option('--pi0_smooth_df', default=3, show_default=True, type=int, help='Number of degrees-of-freedom to use when estimating pi_0 with a smoother.')
@click.option('--pi0_smooth_log_pi0/--no-pi0_smooth_log_pi0', default=False, show_default=True, help='If True and pi0_method = "smoother", pi0 will be estimated by applying a smoother to a scatterplot of log(pi0) estimates against the tuning parameter lambda.')
@click.option('--lfdr_truncate/--no-lfdr_truncate', show_default=True, default=True, help='If True, local FDR values >1 are set to 1.')
@click.option('--lfdr_monotone/--no-lfdr_monotone', show_default=True, default=True, help='If True, local FDR values are non-decreasing with increasing p-values.')
@click.option('--lfdr_transformation', default='probit', show_default=True, type=click.Choice(['probit', 'logit']), help='Either a "probit" or "logit" transformation is applied to the p-values so that a local FDR estimate can be formed that does not involve edge effects of the [0,1] interval in which the p-values lie.')
@click.option('--lfdr_adj', default=1.5, show_default=True, type=float, help='Numeric value that is applied as a multiple of the smoothing bandwidth used in the density estimation.')
@click.option('--lfdr_eps', default=np.power(10.0,-8), show_default=True, type=float, help='Numeric value that is threshold for the tails of the empirical p-value distribution.')
def protein(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer proteins and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_proteins(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps)
# Subsample OpenSWATH file to minimum for integrated scoring
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='OpenSWATH input file.')
@click.option('--out','outfile', type=click.Path(exists=False), help='Subsampled OSWS output file.')
@click.option('--subsample_ratio', default=1, show_default=True, type=float, help='Subsample ratio used per input file.', callback=transform_subsample_ratio)
@click.option('--test/--no-test', default=False, show_default=True, help='Run in test mode with fixed seed.')
def subsample(infile, outfile, subsample_ratio, test):
"""
Subsample OpenSWATH file to minimum for integrated scoring
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
subsample_osw(infile, outfile, subsample_ratio, test)
# Reduce scored PyProphet file to minimum for global scoring
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Scored PyProphet input file.')
@click.option('--out','outfile', type=click.Path(exists=False), help='Reduced OSWR output file.')
def reduce(infile, outfile):
"""
Reduce scored PyProphet file to minimum for global scoring
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
reduce_osw(infile, outfile)
# Merging of multiple runs
@cli.command()
@click.argument('infiles', nargs=-1, type=click.Path(exists=True))
@click.option('--out','outfile', required=True, type=click.Path(exists=False), help='Merged OSW output file.')
@click.option('--same_run/--no-same_run', default=False, help='Assume input files are from same run (deletes run information).')
@click.option('--template','templatefile', required=True, type=click.Path(exists=False), help='Template OSW file.')
@click.option('--merged_post_scored_runs', is_flag=True, help='Merge OSW output files that have already been scored.')
def merge(infiles, outfile, same_run, templatefile, merged_post_scored_runs):
"""
Merge multiple OSW files and (for large experiments, it is recommended to subsample first).
"""
if len(infiles) < 1:
raise click.ClickException("At least one PyProphet input file needs to be provided.")
merge_osw(infiles, outfile, templatefile, same_run, merged_post_scored_runs)
# Backpropagate multi-run peptide and protein scores to single files
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='Single run PyProphet input file.')
@click.option('--out','outfile', type=click.Path(exists=False), help='Single run (with multi-run scores) PyProphet output file.')
@click.option('--apply_scores', required=True, type=click.Path(exists=True), help='PyProphet multi-run scores file to apply.')
def backpropagate(infile, outfile, apply_scores):
"""
Backpropagate multi-run peptide and protein scores to single files
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
backpropagate_oswr(infile, outfile, apply_scores)
# Export TSV
@cli.command()
# File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='Output TSV/CSV (matrix, legacy_split, legacy_merged) file.')
@click.option('--format', default='legacy_split', show_default=True, type=click.Choice(['matrix', 'legacy_split', 'legacy_merged','score_plots']), help='Export format, either matrix, legacy_split/legacy_merged (mProphet/PyProphet) or score_plots format.')
@click.option('--csv/--no-csv', 'outcsv', default=False, show_default=True, help='Export CSV instead of TSV file.')
# Context
@click.option('--transition_quantification/--no-transition_quantification', default=True, show_default=True, help='[format: legacy] Report aggregated transition-level quantification.')
@click.option('--max_transition_pep', default=0.7, show_default=True, type=float, help='[format: legacy] Maximum PEP to retain scored transitions for quantification (requires transition-level scoring).')
@click.option('--ipf', default='peptidoform', show_default=True, type=click.Choice(['peptidoform','augmented','disable']), help='[format: matrix/legacy] Should IPF results be reported if present? "peptidoform": Report results on peptidoform-level, "augmented": Augment OpenSWATH results with IPF scores, "disable": Ignore IPF results')
@click.option('--ipf_max_peptidoform_pep', default=0.4, show_default=True, type=float, help='[format: matrix/legacy] IPF: Filter results to maximum run-specific peptidoform-level PEP.')
@click.option('--max_rs_peakgroup_qvalue', default=0.05, show_default=True, type=float, help='[format: matrix/legacy] Filter results to maximum run-specific peak group-level q-value.')
@click.option('--peptide/--no-peptide', default=True, show_default=True, help='Append peptide-level error-rate estimates if available.')
@click.option('--max_global_peptide_qvalue', default=0.01, show_default=True, type=float, help='[format: matrix/legacy] Filter results to maximum global peptide-level q-value.')
@click.option('--protein/--no-protein', default=True, show_default=True, help='Append protein-level error-rate estimates if available.')
@click.option('--max_global_protein_qvalue', default=0.01, show_default=True, type=float, help='[format: matrix/legacy] Filter results to maximum global protein-level q-value.')
def export(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue):
"""
Export TSV/CSV tables
"""
if format == "score_plots":
export_score_plots(infile)
else:
if outfile is None:
if outcsv:
outfile = infile.split(".osw")[0] + ".csv"
else:
outfile = infile.split(".osw")[0] + ".tsv"
else:
outfile = outfile
export_tsv(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue)
# Export Compound TSV
@cli.command()
#File handling
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--out', 'outfile', type=click.Path(exists=False), help='Output TSV/CSV (matrix, legacy_merged) file.')
@click.option('--format', default='legacy_merged', show_default=True, type=click.Choice(['matrix', 'legacy_merged','score_plots']), help='Export format, either matrix, legacy_merged (PyProphet) or score_plots format.')
@click.option('--csv/--no-csv', 'outcsv', default=False, show_default=True, help='Export CSV instead of TSV file.')
# Context
@click.option('--max_rs_peakgroup_qvalue', default=0.05, show_default=True, type=float, help='[format: matrix/legacy] Filter results to maximum run-specific peak group-level q-value.')
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue):
"""
Export Compound TSV/CSV tables
"""
if format == "score_plots":
export_score_plots(infile)
else:
if outfile is None:
if outcsv:
outfile = infile.split(".osw")[0] + ".csv"
else:
outfile = infile.split(".osw")[0] + ".tsv"
else:
outfile = outfile
export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
# Filter sqMass files
@cli.command()
# File handling
@click.argument('sqmassfiles', nargs=-1, type=click.Path(exists=True))
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
@click.option('--max_precursor_pep', default=0.7, show_default=True, type=float, help='Maximum PEP to retain scored precursors in sqMass.')
@click.option('--max_peakgroup_pep', default=0.7, show_default=True, type=float, help='Maximum PEP to retain scored peak groups in sqMass.')
@click.option('--max_transition_pep', default=0.7, show_default=True, type=float, help='Maximum PEP to retain scored transitions in sqMass.')
def filter(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep):
"""
Filter sqMass files
"""
filter_sqmass(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep)
# Print statistics
@cli.command()
@click.option('--in', 'infile', required=True, type=click.Path(exists=True), help='PyProphet input file.')
def statistics(infile):
"""
Print PyProphet statistics
"""
con = sqlite3.connect(infile)
qts = [0.01, 0.05, 0.10]
for qt in qts:
if check_sqlite_table(con, 'SCORE_MS2'):
peakgroups = pd.read_sql('SELECT * FROM SCORE_MS2 INNER JOIN FEATURE ON SCORE_MS2.feature_id = FEATURE.id INNER JOIN RUN ON FEATURE.RUN_ID = RUN.ID WHERE RANK==1;' , con)
click.echo("Total peakgroups (q-value<%s): %s" % (qt, len(peakgroups[peakgroups['QVALUE']<qt][['FEATURE_ID']].drop_duplicates())))
click.echo("Total peakgroups per run (q-value<%s):" % qt)
click.echo(tabulate(peakgroups[peakgroups['QVALUE']<qt].groupby(['FILENAME'])['FEATURE_ID'].nunique().reset_index(), showindex=False))
click.echo(10*"=")
if check_sqlite_table(con, 'SCORE_PEPTIDE'):
peptides_global = pd.read_sql('SELECT * FROM SCORE_PEPTIDE WHERE CONTEXT=="global";' , con)
peptides = pd.read_sql('SELECT * FROM SCORE_PEPTIDE INNER JOIN RUN ON SCORE_PEPTIDE.RUN_ID = RUN.ID;' , con)
click.echo("Total peptides (global context) (q-value<%s): %s" % (qt, len(peptides_global[peptides_global['QVALUE']<qt][['PEPTIDE_ID']].drop_duplicates())))
click.echo(tabulate(peptides[peptides['QVALUE']<qt].groupby(['FILENAME'])['PEPTIDE_ID'].nunique().reset_index(), showindex=False))
click.echo(10*"=")
if check_sqlite_table(con, 'SCORE_PROTEIN'):
proteins_global = pd.read_sql('SELECT * FROM SCORE_PROTEIN WHERE CONTEXT=="global";' , con)
proteins = pd.read_sql('SELECT * FROM SCORE_PROTEIN INNER JOIN RUN ON SCORE_PROTEIN.RUN_ID = RUN.ID;' , con)
click.echo("Total proteins (global context) (q-value<%s): %s" % (qt, len(proteins_global[proteins_global['QVALUE']<qt][['PROTEIN_ID']].drop_duplicates())))
click.echo(tabulate(proteins[proteins['QVALUE']<qt].groupby(['FILENAME'])['PROTEIN_ID'].nunique().reset_index(), showindex=False))
click.echo(10*"=")
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def diff(x, dim=-1):
"""
Inverse of x.cumsum(dim=dim).
Compute differences between subsequent elements of the tensor.
Only works on dims -1 and -2.
Args:
x (tensor): Input of arbitrary shape
Returns:
diff (tensor): Result with the same shape as x
"""
if dim == 1:
if x.dim() == 2:
dim = -1
elif x.dim() == 3:
dim = -2
else:
raise ValueError('If dim=1, tensor must have 2 or 3 dimensions')
if dim == 2:
if x.dim() == 3:
dim = -1
elif x.dim() == 4:
dim = -2
else:
raise ValueError('If dim=2, tensor should have 3 or 4 dimensions')
if dim == -1:
return x - F.pad(x, (1, 0))[..., :-1]
elif dim == -2:
return x - F.pad(x, (0, 0, 1, 0))[..., :-1, :]
else:
raise ValueError("dim must be equal to -1 or -2")
class Cumsum(nn.Module):
"""
Compute cumulative sum along the specified dimension of the tensor.
Example:
>>> f = stribor.Cumsum(-1)
>>> f(torch.ones(1, 4))
(tensor([[1., 2., 3., 4.]]), tensor([[0., 0., 0., 0.]]))
Args:
dim (int): Tensor dimension over which to perform the summation. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
assert dim in [-1, -2], '`dim` must be either `-1` or `-2`'
self.dim = dim
def forward(self, x, **kwargs):
y = x.cumsum(self.dim)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = diff(y, self.dim)
return x, torch.zeros_like(x)
class Diff(nn.Module):
"""
Inverse of Cumsum transformation.
Args:
dim (int): Tensor dimension over which to perform the diff. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
self.base_flow = Cumsum(dim)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
class CumsumColumn(nn.Module):
"""
Cumulative sum along the specific column in (..., M, N) matrix.
Example:
>>> f = stribor.CumsumColumn(1)
>>> f(torch.ones(3, 3))[0]
tensor([[1., 1., 1.],
[1., 2., 1.],
[1., 3., 1.]])
Args:
column (int): Column in the (batched) matrix (..., M, N) over which to
perform the summation
"""
def __init__(self, column):
super().__init__()
self.column = column
def forward(self, x, **kwargs):
y = x.clone()
y[..., self.column] = y[..., self.column].cumsum(-1)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = y.clone()
x[..., self.column] = diff(x[..., self.column], -1)
return x, torch.zeros_like(x)
class DiffColumn(nn.Module):
def __init__(self, column):
super().__init__()
self.base_flow = CumsumColumn(column)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
|
# -*- coding:utf-8 -*-
import requests
import random
from wencai.core.cookies import WencaiCookie
class Session(requests.Session):
headers = {
"Accept": "application/json,text/javascript,*/*;q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8",
'Connection': 'keep-alive',
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
'X-Requested-With': "XMLHttpRequest"
}
def __init__(self, proxies=None, verify=False):
requests.Session.__init__(self)
self.headers.update(Session.headers)
if proxies is not None:
if not isinstance(proxies, (list, dict)):
raise TypeError('proxies should be list or dict')
if isinstance(proxies, list):
proxies = random.choice(proxies)
self.proxies = proxies
self.verify = verify
print("proxies", proxies)
print("verify", verify)
def update_headers(self, source, add_headers, force_cookies=False):
if force_cookies:
self.headers['hexin-v'] = WencaiCookie().getHeXinVByHttp()
else:
self.headers['hexin-v'] = WencaiCookie().getHexinVByJson(source=source)
if add_headers is not None:
if not isinstance(add_headers, dict):
raise TypeError('update_headers should be `dict` type.')
for k, v in add_headers.items():
self.headers[k] = v
def get_result(self, url, source=None, force_cookies=False, add_headers=None, **kwargs):
self.update_headers(add_headers=add_headers, source=source, force_cookies=force_cookies)
if self.proxies is None:
return super(Session, self).get(url=url, **kwargs)
else:
return super(Session, self).get(url=url, proxies=self.proxies, verify=self.verify, **kwargs)
def post_result(self, url, source=None, data=None, json=None, add_headers=None, force_cookies=False, **kwargs):
self.update_headers(add_headers=add_headers, source=source, force_cookies=force_cookies)
if self.proxies is None:
ret = super(Session, self).post(url=url, data=data, json=json, **kwargs)
print("headers", self.headers)
print("data", data)
print("ret", ret)
return ret
else:
return super(Session, self).post(url=url, data=data, json=json, proxies=self.proxies, verify=self.verify,
**kwargs)
|
# ============================================================================
# FILE: buffer.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from deoplete.source.base import Base
from deoplete.util import parse_buffer_pattern, getlines
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'buffer'
self.mark = '[B]'
self.events = ['Init', 'BufReadPost', 'BufWritePost']
self.vars = {
'require_same_filetype': True,
}
self._limit = 1000000
self._buffers = {}
self._max_lines = 5000
def on_event(self, context):
self._make_cache(context)
tab_bufnrs = self.vim.call('tabpagebuflist')
self._buffers = {
x['bufnr']: x for x in self._buffers.values()
if x['bufnr'] in tab_bufnrs or
self.vim.call('buflisted', x['bufnr'])
}
def gather_candidates(self, context):
tab_bufnrs = self.vim.call('tabpagebuflist')
same_filetype = self.get_var('require_same_filetype')
return {'sorted_candidates': [
x['candidates'] for x in self._buffers.values()
if not same_filetype or
x['filetype'] in context['filetypes'] or
x['filetype'] in context['same_filetypes'] or
x['bufnr'] in tab_bufnrs
]}
def _make_cache(self, context):
# Bufsize check
size = self.vim.call('line2byte',
self.vim.call('line', '$') + 1) - 1
if size > self._limit:
return
try:
self._buffers[context['bufnr']] = {
'bufnr': context['bufnr'],
'filetype': self.get_buf_option('filetype'),
'candidates': [
{'word': x} for x in
sorted(parse_buffer_pattern(getlines(self.vim),
context['keyword_pattern']),
key=str.lower)
]
}
except UnicodeDecodeError:
return []
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentImage(object):
"""
Supported Agent downloads
"""
#: A constant which can be used with the platform_type property of a ManagementAgentImage.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
#: A constant which can be used with the platform_type property of a ManagementAgentImage.
#: This constant has a value of "WINDOWS"
PLATFORM_TYPE_WINDOWS = "WINDOWS"
#: A constant which can be used with the platform_type property of a ManagementAgentImage.
#: This constant has a value of "SOLARIS"
PLATFORM_TYPE_SOLARIS = "SOLARIS"
#: A constant which can be used with the package_type property of a ManagementAgentImage.
#: This constant has a value of "RPM"
PACKAGE_TYPE_RPM = "RPM"
#: A constant which can be used with the package_type property of a ManagementAgentImage.
#: This constant has a value of "ZIP"
PACKAGE_TYPE_ZIP = "ZIP"
#: A constant which can be used with the package_architecture_type property of a ManagementAgentImage.
#: This constant has a value of "X86_64"
PACKAGE_ARCHITECTURE_TYPE_X86_64 = "X86_64"
#: A constant which can be used with the package_architecture_type property of a ManagementAgentImage.
#: This constant has a value of "SPARC"
PACKAGE_ARCHITECTURE_TYPE_SPARC = "SPARC"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentImage.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentImage object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this ManagementAgentImage.
:type id: str
:param platform_type:
The value to assign to the platform_type property of this ManagementAgentImage.
Allowed values for this property are: "LINUX", "WINDOWS", "SOLARIS"
:type platform_type: str
:param platform_name:
The value to assign to the platform_name property of this ManagementAgentImage.
:type platform_name: str
:param package_type:
The value to assign to the package_type property of this ManagementAgentImage.
Allowed values for this property are: "RPM", "ZIP"
:type package_type: str
:param package_architecture_type:
The value to assign to the package_architecture_type property of this ManagementAgentImage.
Allowed values for this property are: "X86_64", "SPARC"
:type package_architecture_type: str
:param version:
The value to assign to the version property of this ManagementAgentImage.
:type version: str
:param size:
The value to assign to the size property of this ManagementAgentImage.
:type size: float
:param checksum:
The value to assign to the checksum property of this ManagementAgentImage.
:type checksum: str
:param object_url:
The value to assign to the object_url property of this ManagementAgentImage.
:type object_url: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ManagementAgentImage.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED"
:type lifecycle_state: str
"""
self.swagger_types = {
'id': 'str',
'platform_type': 'str',
'platform_name': 'str',
'package_type': 'str',
'package_architecture_type': 'str',
'version': 'str',
'size': 'float',
'checksum': 'str',
'object_url': 'str',
'lifecycle_state': 'str'
}
self.attribute_map = {
'id': 'id',
'platform_type': 'platformType',
'platform_name': 'platformName',
'package_type': 'packageType',
'package_architecture_type': 'packageArchitectureType',
'version': 'version',
'size': 'size',
'checksum': 'checksum',
'object_url': 'objectUrl',
'lifecycle_state': 'lifecycleState'
}
self._id = None
self._platform_type = None
self._platform_name = None
self._package_type = None
self._package_architecture_type = None
self._version = None
self._size = None
self._checksum = None
self._object_url = None
self._lifecycle_state = None
@property
def id(self):
"""
**[Required]** Gets the id of this ManagementAgentImage.
Agent image resource id
:return: The id of this ManagementAgentImage.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementAgentImage.
Agent image resource id
:param id: The id of this ManagementAgentImage.
:type: str
"""
self._id = id
@property
def platform_type(self):
"""
**[Required]** Gets the platform_type of this ManagementAgentImage.
Agent image platform type
Allowed values for this property are: "LINUX", "WINDOWS", "SOLARIS"
:return: The platform_type of this ManagementAgentImage.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this ManagementAgentImage.
Agent image platform type
:param platform_type: The platform_type of this ManagementAgentImage.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS", "SOLARIS"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
raise ValueError(
"Invalid value for `platform_type`, must be None or one of {0}"
.format(allowed_values)
)
self._platform_type = platform_type
@property
def platform_name(self):
"""
Gets the platform_name of this ManagementAgentImage.
Agent image platform display name
:return: The platform_name of this ManagementAgentImage.
:rtype: str
"""
return self._platform_name
@platform_name.setter
def platform_name(self, platform_name):
"""
Sets the platform_name of this ManagementAgentImage.
Agent image platform display name
:param platform_name: The platform_name of this ManagementAgentImage.
:type: str
"""
self._platform_name = platform_name
@property
def package_type(self):
"""
Gets the package_type of this ManagementAgentImage.
The installation package type
Allowed values for this property are: "RPM", "ZIP"
:return: The package_type of this ManagementAgentImage.
:rtype: str
"""
return self._package_type
@package_type.setter
def package_type(self, package_type):
"""
Sets the package_type of this ManagementAgentImage.
The installation package type
:param package_type: The package_type of this ManagementAgentImage.
:type: str
"""
allowed_values = ["RPM", "ZIP"]
if not value_allowed_none_or_none_sentinel(package_type, allowed_values):
raise ValueError(
"Invalid value for `package_type`, must be None or one of {0}"
.format(allowed_values)
)
self._package_type = package_type
@property
def package_architecture_type(self):
"""
Gets the package_architecture_type of this ManagementAgentImage.
The installation package target architecture type
Allowed values for this property are: "X86_64", "SPARC"
:return: The package_architecture_type of this ManagementAgentImage.
:rtype: str
"""
return self._package_architecture_type
@package_architecture_type.setter
def package_architecture_type(self, package_architecture_type):
"""
Sets the package_architecture_type of this ManagementAgentImage.
The installation package target architecture type
:param package_architecture_type: The package_architecture_type of this ManagementAgentImage.
:type: str
"""
allowed_values = ["X86_64", "SPARC"]
if not value_allowed_none_or_none_sentinel(package_architecture_type, allowed_values):
raise ValueError(
"Invalid value for `package_architecture_type`, must be None or one of {0}"
.format(allowed_values)
)
self._package_architecture_type = package_architecture_type
@property
def version(self):
"""
**[Required]** Gets the version of this ManagementAgentImage.
Agent image version
:return: The version of this ManagementAgentImage.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementAgentImage.
Agent image version
:param version: The version of this ManagementAgentImage.
:type: str
"""
self._version = version
@property
def size(self):
"""
Gets the size of this ManagementAgentImage.
Agent image size in bytes
:return: The size of this ManagementAgentImage.
:rtype: float
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this ManagementAgentImage.
Agent image size in bytes
:param size: The size of this ManagementAgentImage.
:type: float
"""
self._size = size
@property
def checksum(self):
"""
Gets the checksum of this ManagementAgentImage.
Agent image content SHA256 Hash
:return: The checksum of this ManagementAgentImage.
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""
Sets the checksum of this ManagementAgentImage.
Agent image content SHA256 Hash
:param checksum: The checksum of this ManagementAgentImage.
:type: str
"""
self._checksum = checksum
@property
def object_url(self):
"""
Gets the object_url of this ManagementAgentImage.
Object storage URL for download
:return: The object_url of this ManagementAgentImage.
:rtype: str
"""
return self._object_url
@object_url.setter
def object_url(self, object_url):
"""
Sets the object_url of this ManagementAgentImage.
Object storage URL for download
:param object_url: The object_url of this ManagementAgentImage.
:type: str
"""
self._object_url = object_url
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ManagementAgentImage.
The current state of Management Agent Image
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED"
:return: The lifecycle_state of this ManagementAgentImage.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ManagementAgentImage.
The current state of Management Agent Image
:param lifecycle_state: The lifecycle_state of this ManagementAgentImage.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
raise ValueError(
"Invalid value for `lifecycle_state`, must be None or one of {0}"
.format(allowed_values)
)
self._lifecycle_state = lifecycle_state
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# coding: utf-8
from __future__ import absolute_import
# import KafkaClient
from huaweicloudsdkkafka.v2.kafka_client import KafkaClient
from huaweicloudsdkkafka.v2.kafka_async_client import KafkaAsyncClient
# import models into sdk package
from huaweicloudsdkkafka.v2.model.access_policy_entity import AccessPolicyEntity
from huaweicloudsdkkafka.v2.model.access_policy_topic_entity import AccessPolicyTopicEntity
from huaweicloudsdkkafka.v2.model.batch_create_or_delete_kafka_tag_request import BatchCreateOrDeleteKafkaTagRequest
from huaweicloudsdkkafka.v2.model.batch_create_or_delete_kafka_tag_response import BatchCreateOrDeleteKafkaTagResponse
from huaweicloudsdkkafka.v2.model.batch_create_or_delete_tag_req import BatchCreateOrDeleteTagReq
from huaweicloudsdkkafka.v2.model.batch_delete_instance_topic_req import BatchDeleteInstanceTopicReq
from huaweicloudsdkkafka.v2.model.batch_delete_instance_topic_request import BatchDeleteInstanceTopicRequest
from huaweicloudsdkkafka.v2.model.batch_delete_instance_topic_resp_topics import BatchDeleteInstanceTopicRespTopics
from huaweicloudsdkkafka.v2.model.batch_delete_instance_topic_response import BatchDeleteInstanceTopicResponse
from huaweicloudsdkkafka.v2.model.batch_delete_instance_users_req import BatchDeleteInstanceUsersReq
from huaweicloudsdkkafka.v2.model.batch_delete_instance_users_request import BatchDeleteInstanceUsersRequest
from huaweicloudsdkkafka.v2.model.batch_delete_instance_users_response import BatchDeleteInstanceUsersResponse
from huaweicloudsdkkafka.v2.model.batch_restart_or_delete_instance_req import BatchRestartOrDeleteInstanceReq
from huaweicloudsdkkafka.v2.model.batch_restart_or_delete_instance_resp_results import BatchRestartOrDeleteInstanceRespResults
from huaweicloudsdkkafka.v2.model.batch_restart_or_delete_instances_request import BatchRestartOrDeleteInstancesRequest
from huaweicloudsdkkafka.v2.model.batch_restart_or_delete_instances_response import BatchRestartOrDeleteInstancesResponse
from huaweicloudsdkkafka.v2.model.create_connector_req import CreateConnectorReq
from huaweicloudsdkkafka.v2.model.create_connector_request import CreateConnectorRequest
from huaweicloudsdkkafka.v2.model.create_connector_response import CreateConnectorResponse
from huaweicloudsdkkafka.v2.model.create_instance_topic_req import CreateInstanceTopicReq
from huaweicloudsdkkafka.v2.model.create_instance_topic_request import CreateInstanceTopicRequest
from huaweicloudsdkkafka.v2.model.create_instance_topic_response import CreateInstanceTopicResponse
from huaweicloudsdkkafka.v2.model.create_instance_user_req import CreateInstanceUserReq
from huaweicloudsdkkafka.v2.model.create_instance_user_request import CreateInstanceUserRequest
from huaweicloudsdkkafka.v2.model.create_instance_user_response import CreateInstanceUserResponse
from huaweicloudsdkkafka.v2.model.create_partition_req import CreatePartitionReq
from huaweicloudsdkkafka.v2.model.create_partition_request import CreatePartitionRequest
from huaweicloudsdkkafka.v2.model.create_partition_response import CreatePartitionResponse
from huaweicloudsdkkafka.v2.model.create_post_paid_instance_req import CreatePostPaidInstanceReq
from huaweicloudsdkkafka.v2.model.create_post_paid_instance_request import CreatePostPaidInstanceRequest
from huaweicloudsdkkafka.v2.model.create_post_paid_instance_response import CreatePostPaidInstanceResponse
from huaweicloudsdkkafka.v2.model.create_sink_task_req import CreateSinkTaskReq
from huaweicloudsdkkafka.v2.model.create_sink_task_request import CreateSinkTaskRequest
from huaweicloudsdkkafka.v2.model.create_sink_task_response import CreateSinkTaskResponse
from huaweicloudsdkkafka.v2.model.delete_background_task_request import DeleteBackgroundTaskRequest
from huaweicloudsdkkafka.v2.model.delete_background_task_response import DeleteBackgroundTaskResponse
from huaweicloudsdkkafka.v2.model.delete_instance_request import DeleteInstanceRequest
from huaweicloudsdkkafka.v2.model.delete_instance_response import DeleteInstanceResponse
from huaweicloudsdkkafka.v2.model.delete_sink_task_request import DeleteSinkTaskRequest
from huaweicloudsdkkafka.v2.model.delete_sink_task_response import DeleteSinkTaskResponse
from huaweicloudsdkkafka.v2.model.diskusage_entity import DiskusageEntity
from huaweicloudsdkkafka.v2.model.diskusage_topic_entity import DiskusageTopicEntity
from huaweicloudsdkkafka.v2.model.list_available_zones_request import ListAvailableZonesRequest
from huaweicloudsdkkafka.v2.model.list_available_zones_resp_available_zones import ListAvailableZonesRespAvailableZones
from huaweicloudsdkkafka.v2.model.list_available_zones_response import ListAvailableZonesResponse
from huaweicloudsdkkafka.v2.model.list_background_tasks_request import ListBackgroundTasksRequest
from huaweicloudsdkkafka.v2.model.list_background_tasks_resp_tasks import ListBackgroundTasksRespTasks
from huaweicloudsdkkafka.v2.model.list_background_tasks_response import ListBackgroundTasksResponse
from huaweicloudsdkkafka.v2.model.list_instance_topics_request import ListInstanceTopicsRequest
from huaweicloudsdkkafka.v2.model.list_instance_topics_response import ListInstanceTopicsResponse
from huaweicloudsdkkafka.v2.model.list_instances_request import ListInstancesRequest
from huaweicloudsdkkafka.v2.model.list_instances_response import ListInstancesResponse
from huaweicloudsdkkafka.v2.model.list_products_request import ListProductsRequest
from huaweicloudsdkkafka.v2.model.list_products_resp_detail import ListProductsRespDetail
from huaweicloudsdkkafka.v2.model.list_products_resp_hourly import ListProductsRespHourly
from huaweicloudsdkkafka.v2.model.list_products_resp_io import ListProductsRespIo
from huaweicloudsdkkafka.v2.model.list_products_resp_values import ListProductsRespValues
from huaweicloudsdkkafka.v2.model.list_products_response import ListProductsResponse
from huaweicloudsdkkafka.v2.model.list_sink_tasks_request import ListSinkTasksRequest
from huaweicloudsdkkafka.v2.model.list_sink_tasks_resp_tasks import ListSinkTasksRespTasks
from huaweicloudsdkkafka.v2.model.list_sink_tasks_response import ListSinkTasksResponse
from huaweicloudsdkkafka.v2.model.maintain_windows_entity import MaintainWindowsEntity
from huaweicloudsdkkafka.v2.model.messages_entity import MessagesEntity
from huaweicloudsdkkafka.v2.model.obs_destination_descriptor import ObsDestinationDescriptor
from huaweicloudsdkkafka.v2.model.policy_entity import PolicyEntity
from huaweicloudsdkkafka.v2.model.reset_manager_password_req import ResetManagerPasswordReq
from huaweicloudsdkkafka.v2.model.reset_manager_password_request import ResetManagerPasswordRequest
from huaweicloudsdkkafka.v2.model.reset_manager_password_response import ResetManagerPasswordResponse
from huaweicloudsdkkafka.v2.model.reset_message_offset_req import ResetMessageOffsetReq
from huaweicloudsdkkafka.v2.model.reset_message_offset_request import ResetMessageOffsetRequest
from huaweicloudsdkkafka.v2.model.reset_message_offset_response import ResetMessageOffsetResponse
from huaweicloudsdkkafka.v2.model.reset_password_req import ResetPasswordReq
from huaweicloudsdkkafka.v2.model.reset_password_request import ResetPasswordRequest
from huaweicloudsdkkafka.v2.model.reset_password_response import ResetPasswordResponse
from huaweicloudsdkkafka.v2.model.reset_replica_req import ResetReplicaReq
from huaweicloudsdkkafka.v2.model.reset_replica_req_partitions import ResetReplicaReqPartitions
from huaweicloudsdkkafka.v2.model.reset_user_passwrod_req import ResetUserPasswrodReq
from huaweicloudsdkkafka.v2.model.reset_user_passwrod_request import ResetUserPasswrodRequest
from huaweicloudsdkkafka.v2.model.reset_user_passwrod_response import ResetUserPasswrodResponse
from huaweicloudsdkkafka.v2.model.resize_instance_req import ResizeInstanceReq
from huaweicloudsdkkafka.v2.model.resize_instance_request import ResizeInstanceRequest
from huaweicloudsdkkafka.v2.model.resize_instance_response import ResizeInstanceResponse
from huaweicloudsdkkafka.v2.model.restart_manager_request import RestartManagerRequest
from huaweicloudsdkkafka.v2.model.restart_manager_response import RestartManagerResponse
from huaweicloudsdkkafka.v2.model.show_background_task_request import ShowBackgroundTaskRequest
from huaweicloudsdkkafka.v2.model.show_background_task_response import ShowBackgroundTaskResponse
from huaweicloudsdkkafka.v2.model.show_ces_hierarchy_request import ShowCesHierarchyRequest
from huaweicloudsdkkafka.v2.model.show_ces_hierarchy_response import ShowCesHierarchyResponse
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_children import ShowCeshierarchyRespChildren
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_dimensions import ShowCeshierarchyRespDimensions
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_groups import ShowCeshierarchyRespGroups
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_instance_ids import ShowCeshierarchyRespInstanceIds
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_nodes import ShowCeshierarchyRespNodes
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_partitions import ShowCeshierarchyRespPartitions
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_queues import ShowCeshierarchyRespQueues
from huaweicloudsdkkafka.v2.model.show_ceshierarchy_resp_queues1 import ShowCeshierarchyRespQueues1
from huaweicloudsdkkafka.v2.model.show_cluster_request import ShowClusterRequest
from huaweicloudsdkkafka.v2.model.show_cluster_resp_cluster import ShowClusterRespCluster
from huaweicloudsdkkafka.v2.model.show_cluster_resp_cluster_brokers import ShowClusterRespClusterBrokers
from huaweicloudsdkkafka.v2.model.show_cluster_response import ShowClusterResponse
from huaweicloudsdkkafka.v2.model.show_coordinators_request import ShowCoordinatorsRequest
from huaweicloudsdkkafka.v2.model.show_coordinators_resp_coordinators import ShowCoordinatorsRespCoordinators
from huaweicloudsdkkafka.v2.model.show_coordinators_response import ShowCoordinatorsResponse
from huaweicloudsdkkafka.v2.model.show_groups_request import ShowGroupsRequest
from huaweicloudsdkkafka.v2.model.show_groups_resp_group import ShowGroupsRespGroup
from huaweicloudsdkkafka.v2.model.show_groups_resp_group_assignment import ShowGroupsRespGroupAssignment
from huaweicloudsdkkafka.v2.model.show_groups_resp_group_group_message_offsets import ShowGroupsRespGroupGroupMessageOffsets
from huaweicloudsdkkafka.v2.model.show_groups_resp_group_members import ShowGroupsRespGroupMembers
from huaweicloudsdkkafka.v2.model.show_groups_response import ShowGroupsResponse
from huaweicloudsdkkafka.v2.model.show_instance_extend_product_info_request import ShowInstanceExtendProductInfoRequest
from huaweicloudsdkkafka.v2.model.show_instance_extend_product_info_response import ShowInstanceExtendProductInfoResponse
from huaweicloudsdkkafka.v2.model.show_instance_messages_request import ShowInstanceMessagesRequest
from huaweicloudsdkkafka.v2.model.show_instance_messages_response import ShowInstanceMessagesResponse
from huaweicloudsdkkafka.v2.model.show_instance_request import ShowInstanceRequest
from huaweicloudsdkkafka.v2.model.show_instance_resp import ShowInstanceResp
from huaweicloudsdkkafka.v2.model.show_instance_response import ShowInstanceResponse
from huaweicloudsdkkafka.v2.model.show_instance_topic_detail_request import ShowInstanceTopicDetailRequest
from huaweicloudsdkkafka.v2.model.show_instance_topic_detail_resp_partitions import ShowInstanceTopicDetailRespPartitions
from huaweicloudsdkkafka.v2.model.show_instance_topic_detail_resp_replicas import ShowInstanceTopicDetailRespReplicas
from huaweicloudsdkkafka.v2.model.show_instance_topic_detail_response import ShowInstanceTopicDetailResponse
from huaweicloudsdkkafka.v2.model.show_instance_users_entity import ShowInstanceUsersEntity
from huaweicloudsdkkafka.v2.model.show_instance_users_request import ShowInstanceUsersRequest
from huaweicloudsdkkafka.v2.model.show_instance_users_response import ShowInstanceUsersResponse
from huaweicloudsdkkafka.v2.model.show_kafka_project_tags_request import ShowKafkaProjectTagsRequest
from huaweicloudsdkkafka.v2.model.show_kafka_project_tags_response import ShowKafkaProjectTagsResponse
from huaweicloudsdkkafka.v2.model.show_kafka_tags_request import ShowKafkaTagsRequest
from huaweicloudsdkkafka.v2.model.show_kafka_tags_response import ShowKafkaTagsResponse
from huaweicloudsdkkafka.v2.model.show_kafka_topic_partition_diskusage_request import ShowKafkaTopicPartitionDiskusageRequest
from huaweicloudsdkkafka.v2.model.show_kafka_topic_partition_diskusage_response import ShowKafkaTopicPartitionDiskusageResponse
from huaweicloudsdkkafka.v2.model.show_maintain_windows_request import ShowMaintainWindowsRequest
from huaweicloudsdkkafka.v2.model.show_maintain_windows_response import ShowMaintainWindowsResponse
from huaweicloudsdkkafka.v2.model.show_messages_request import ShowMessagesRequest
from huaweicloudsdkkafka.v2.model.show_messages_resp_messages import ShowMessagesRespMessages
from huaweicloudsdkkafka.v2.model.show_messages_response import ShowMessagesResponse
from huaweicloudsdkkafka.v2.model.show_partition_beginning_message_request import ShowPartitionBeginningMessageRequest
from huaweicloudsdkkafka.v2.model.show_partition_beginning_message_response import ShowPartitionBeginningMessageResponse
from huaweicloudsdkkafka.v2.model.show_partition_end_message_request import ShowPartitionEndMessageRequest
from huaweicloudsdkkafka.v2.model.show_partition_end_message_response import ShowPartitionEndMessageResponse
from huaweicloudsdkkafka.v2.model.show_partition_message_entity import ShowPartitionMessageEntity
from huaweicloudsdkkafka.v2.model.show_partition_message_request import ShowPartitionMessageRequest
from huaweicloudsdkkafka.v2.model.show_partition_message_response import ShowPartitionMessageResponse
from huaweicloudsdkkafka.v2.model.show_sink_task_detail_request import ShowSinkTaskDetailRequest
from huaweicloudsdkkafka.v2.model.show_sink_task_detail_resp_obs_destination_descriptor import ShowSinkTaskDetailRespObsDestinationDescriptor
from huaweicloudsdkkafka.v2.model.show_sink_task_detail_resp_partitions import ShowSinkTaskDetailRespPartitions
from huaweicloudsdkkafka.v2.model.show_sink_task_detail_resp_topics_info import ShowSinkTaskDetailRespTopicsInfo
from huaweicloudsdkkafka.v2.model.show_sink_task_detail_response import ShowSinkTaskDetailResponse
from huaweicloudsdkkafka.v2.model.show_topic_access_policy_request import ShowTopicAccessPolicyRequest
from huaweicloudsdkkafka.v2.model.show_topic_access_policy_response import ShowTopicAccessPolicyResponse
from huaweicloudsdkkafka.v2.model.tag_entity import TagEntity
from huaweicloudsdkkafka.v2.model.tag_multy_value_entity import TagMultyValueEntity
from huaweicloudsdkkafka.v2.model.topic_entity import TopicEntity
from huaweicloudsdkkafka.v2.model.update_instance_auto_create_topic_req import UpdateInstanceAutoCreateTopicReq
from huaweicloudsdkkafka.v2.model.update_instance_auto_create_topic_request import UpdateInstanceAutoCreateTopicRequest
from huaweicloudsdkkafka.v2.model.update_instance_auto_create_topic_response import UpdateInstanceAutoCreateTopicResponse
from huaweicloudsdkkafka.v2.model.update_instance_cross_vpc_ip_req import UpdateInstanceCrossVpcIpReq
from huaweicloudsdkkafka.v2.model.update_instance_cross_vpc_ip_request import UpdateInstanceCrossVpcIpRequest
from huaweicloudsdkkafka.v2.model.update_instance_cross_vpc_ip_resp_results import UpdateInstanceCrossVpcIpRespResults
from huaweicloudsdkkafka.v2.model.update_instance_cross_vpc_ip_response import UpdateInstanceCrossVpcIpResponse
from huaweicloudsdkkafka.v2.model.update_instance_req import UpdateInstanceReq
from huaweicloudsdkkafka.v2.model.update_instance_request import UpdateInstanceRequest
from huaweicloudsdkkafka.v2.model.update_instance_response import UpdateInstanceResponse
from huaweicloudsdkkafka.v2.model.update_instance_topic_req import UpdateInstanceTopicReq
from huaweicloudsdkkafka.v2.model.update_instance_topic_req_topics import UpdateInstanceTopicReqTopics
from huaweicloudsdkkafka.v2.model.update_instance_topic_request import UpdateInstanceTopicRequest
from huaweicloudsdkkafka.v2.model.update_instance_topic_response import UpdateInstanceTopicResponse
from huaweicloudsdkkafka.v2.model.update_sink_task_quota_req import UpdateSinkTaskQuotaReq
from huaweicloudsdkkafka.v2.model.update_sink_task_quota_request import UpdateSinkTaskQuotaRequest
from huaweicloudsdkkafka.v2.model.update_sink_task_quota_response import UpdateSinkTaskQuotaResponse
from huaweicloudsdkkafka.v2.model.update_topic_access_policy_req import UpdateTopicAccessPolicyReq
from huaweicloudsdkkafka.v2.model.update_topic_access_policy_request import UpdateTopicAccessPolicyRequest
from huaweicloudsdkkafka.v2.model.update_topic_access_policy_response import UpdateTopicAccessPolicyResponse
from huaweicloudsdkkafka.v2.model.update_topic_replica_request import UpdateTopicReplicaRequest
from huaweicloudsdkkafka.v2.model.update_topic_replica_response import UpdateTopicReplicaResponse
|
#%%
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from keras.datasets import mnist
from tensorflow.examples.tutorials.mnist import input_data
import keras
#%%
tf.reset_default_graph()
def dataset():
return mnist.load_data()
(X_train,y_train), (X_test, y_test) = dataset()
mnists = input_data.read_data_sets("/tmp/data/")
#%%
from functools import partial
tf.reset_default_graph()
n_inputs = 784
n_hidden1 = 300
n_hidden2 = 150
n_hidden3 = n_hidden1
n_outputs = n_hidden1
learning_rate = 0.01
l2_reg = .0001
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
he_init = tf.keras.initializers.he_normal(seed=None)
l2_regularizer = tf.keras.regularizers.l2(l2_reg)
my_dense_layer = partial(tf.layers.dense, activation=tf.nn.elu,
kernel_initializer=he_init,
kernel_regularizerr=l2_regularizer)
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3 = my_dense_layer(hidden2, n_hidden3)
n_outputs = my_dense_layer(hidden3, n_outputs, activation=None)
loss = tf.losses.mean_squared_error(X,n_outputs)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
loss_func = tf.summary.scalar("loss_func",loss)
merge = tf.summary.merge_all()
writer = tf.summary.FileWriter("./mylog")
saver = tf.train.Saver()
init = tf.global_variables_initializer()
#%%
n_epochs=5
batch_size=150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batch = X_train.size//batch_size
for iteration in range(n_batch):
X_batch = X_train[iteration*n_batch:iteration*n_batch+batch_size]
_,summary=sess.run([train_op,merge],feed_dict={X:X_batch})
loss_train=loss.eval(feed_dict={X:X_batch,})
print("\r{}".format(epoch),"Train MSE:",loss_train)
writer.add_summary(summary)
saver.save(sess, "./my_model_all_layers.ckpt")
|
"""Tries to access all OpenEO job endpoints - A initial version of 'integration tests' for the jobs service..
To run them quiete some services need to be running - at least the gateway, RabbitMQ, the jobs, files and processes
service and the complete Airflow setup including webserver, scheduler, postgres, RabbitMQ and celery workers. Do not
forget to mount all required volumes / folders - for more details check the api docker-compose file - and provide
required environment variables.
Once the 'backend' is running some environment variable for this script need to be specified. In detail USERNAME,
PASSWORD, BACKEND_URL. To provide them you can copy the `sample_auth` file provided in this directory and add a USERNAME
PASSWORD combination existing on the backend. BACKEND_URL needs points to the public gateway url. Execute the copied
script to export the variables.
Then this script can be directly executed with
>>>python ./rest_calls.py
It will perform calls to all job service endpoints and print the status code. It does not do any checks automatically,
you rather have to examine the return status and responses yourself.
"""
import json
import os
from typing import Dict
import requests
backend_url = os.environ.get("BACKEND_URL")
if backend_url is None:
raise OSError("Environment variable BACKEND_URL needs to be specified!")
basic_auth_url = backend_url + '/credentials/basic'
job_url = backend_url + '/jobs'
sync_job_url = backend_url + '/result'
def get_auth() -> Dict[str, str]:
"""Try to authenticate and return auth header for subsequent calls or None.
The USERNAME and PASSWORD need to be set as environment variables.
"""
auth_response = requests.get(basic_auth_url, auth=(os.environ.get('USERNAME'), os.environ.get('PASSWORD')))
return {'Authorization': 'Bearer basic//' + auth_response.json()['access_token']}
def load_json(filename: str) -> dict:
"""Load json from the test data folder with the given filename."""
json_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', filename + '.json')
with open(json_path) as f:
return json.load(f)
def check_jobs() -> None:
"""Try to perform simple REST calls to all job service endpoints and print the return status code."""
job = load_json('pg')
response_create = requests.post(job_url, json=job, headers=get_auth())
print(f"Create response: {response_create.status_code}") # noqa T001
job_id = response_create.headers['Location'].split('jobs/')[-1]
print(f'Job Id: {job_id}') # noqa T001
response_get = requests.get(f'{job_url}/{job_id}', headers=get_auth())
print(f"Get Full Response: {response_get.status_code}") # noqa T001
response_get_all = requests.get(job_url, headers=get_auth())
print(f"Get all response: {response_get_all.status_code}") # noqa T001
job_update = load_json('job_update')
response_patch = requests.patch(f'{job_url}/{job_id}', json=job_update, headers=get_auth())
print(f"Patch Response: {response_patch.status_code}") # noqa T001
response_process = requests.post(f'{job_url}/{job_id}/results', headers=get_auth())
print(f"Process Response: {response_process.status_code}") # noqa T001
response_delete = requests.delete(f'{job_url}/{job_id}', headers=get_auth())
print(f"Delete Response: {response_delete.status_code}") # noqa T001
response_process_sync = requests.post(sync_job_url, json=job, headers=get_auth())
print(f"Process_sync response: {response_process_sync.status_code}") # noqa T001
if __name__ == '__main__':
check_jobs()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoSignFlowCancelModel(object):
def __init__(self):
self._flow_id = None
self._revoke_reason = None
@property
def flow_id(self):
return self._flow_id
@flow_id.setter
def flow_id(self, value):
self._flow_id = value
@property
def revoke_reason(self):
return self._revoke_reason
@revoke_reason.setter
def revoke_reason(self, value):
self._revoke_reason = value
def to_alipay_dict(self):
params = dict()
if self.flow_id:
if hasattr(self.flow_id, 'to_alipay_dict'):
params['flow_id'] = self.flow_id.to_alipay_dict()
else:
params['flow_id'] = self.flow_id
if self.revoke_reason:
if hasattr(self.revoke_reason, 'to_alipay_dict'):
params['revoke_reason'] = self.revoke_reason.to_alipay_dict()
else:
params['revoke_reason'] = self.revoke_reason
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoSignFlowCancelModel()
if 'flow_id' in d:
o.flow_id = d['flow_id']
if 'revoke_reason' in d:
o.revoke_reason = d['revoke_reason']
return o
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cros tryjob: Schedule a tryjob."""
from __future__ import print_function
import json
import os
import time
from chromite.lib import constants
from chromite.cli import command
from chromite.lib import config_lib
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import request_build
from chromite.cbuildbot import trybot_patch_pool
REMOTE = 'remote'
LOCAL = 'local'
CBUILDBOT = 'cbuildbot'
def ConfigsToPrint(site_config, production, build_config_fragments):
"""Select a list of buildbot configs to print out.
Args:
site_config: config_lib.SiteConfig containing all config info.
production: Display tryjob or production configs?.
build_config_fragments: List of strings to filter config names with.
Returns:
List of config_lib.BuildConfig objects.
"""
configs = site_config.values()
def optionsMatch(config):
# In this case, build_configs are config name fragments. If the config
# name doesn't contain any of the fragments, filter it out.
for build_config in build_config_fragments:
if build_config not in config.name:
return False
return config_lib.isTryjobConfig(config) != production
# All configs, filtered by optionsMatch.
configs = [config for config in configs if optionsMatch(config)]
# Sort build type, then board.
# 'daisy-paladin-tryjob' -> ['tryjob', 'paladin', 'daisy']
configs.sort(key=lambda c: list(reversed(c.name.split('-'))))
return configs
def PrintKnownConfigs(site_config, production, build_config_fragments):
"""Print a list of known buildbot configs.
Args:
site_config: config_lib.SiteConfig containing all config info.
production: Display tryjob or production configs?.
build_config_fragments: List of strings to filter config names with.
"""
configs = ConfigsToPrint(site_config, production, build_config_fragments)
COLUMN_WIDTH = max(len(c.name) for c in configs) + 1
if production:
print('Production configs:')
else:
print('Tryjob configs:')
print('config'.ljust(COLUMN_WIDTH), 'description')
print('------'.ljust(COLUMN_WIDTH), '-----------')
for config in configs:
desc = config.description or ''
print(config.name.ljust(COLUMN_WIDTH), desc)
def CbuildbotArgs(options):
"""Function to generate cbuidlbot command line args.
This are pre-api version filtering.
Args:
options: Parsed cros tryjob tryjob arguments.
Returns:
List of strings in ['arg1', 'arg2'] format
"""
args = []
if options.where == REMOTE:
if options.production:
args.append('--buildbot')
else:
args.append('--remote-trybot')
elif options.where == LOCAL:
args.append('--no-buildbot-tags')
if options.production:
# This is expected to fail on workstations without an explicit --debug,
# or running 'branch-util'.
args.append('--buildbot')
else:
args.append('--debug')
elif options.where == CBUILDBOT:
args.extend(('--buildbot', '--nobootstrap', '--noreexec',
'--no-buildbot-tags'))
if not options.production:
args.append('--debug')
else:
raise Exception('Unknown options.where: %s', options.where)
if options.buildroot:
args.extend(('--buildroot', options.buildroot))
if options.branch:
args.extend(('-b', options.branch))
for g in options.gerrit_patches:
args.extend(('-g', g))
if options.passthrough:
args.extend(options.passthrough)
if options.passthrough_raw:
args.extend(options.passthrough_raw)
return args
def CreateBuildrootIfNeeded(buildroot):
"""Create the buildroot is it doesn't exist with confirmation prompt.
Args:
buildroot: The buildroot path to create as a string.
Returns:
boolean: Does the buildroot now exist?
"""
if os.path.exists(buildroot):
return True
prompt = 'Create %s as buildroot' % buildroot
if not cros_build_lib.BooleanPrompt(prompt=prompt, default=False):
print('Please specify a different buildroot via the --buildroot option.')
return False
os.makedirs(buildroot)
return True
def RunLocal(options):
"""Run a local tryjob.
Args:
options: Parsed cros tryjob tryjob arguments.
Returns:
Exit code of build as an int.
"""
if cros_build_lib.IsInsideChroot():
cros_build_lib.Die('Local tryjobs cannot be started inside the chroot.')
args = CbuildbotArgs(options)
if not CreateBuildrootIfNeeded(options.buildroot):
return 1
# Define the command to run.
launcher = os.path.join(constants.CHROMITE_DIR, 'scripts', 'cbuildbot_launch')
cmd = [launcher] + args + options.build_configs
# Run the tryjob.
result = cros_build_lib.RunCommand(cmd, debug_level=logging.CRITICAL,
error_code_ok=True, cwd=options.buildroot)
return result.returncode
def RunCbuildbot(options):
"""Run a cbuildbot build.
Args:
options: Parsed cros tryjob tryjob arguments.
Returns:
Exit code of build as an int.
"""
if cros_build_lib.IsInsideChroot():
cros_build_lib.Die('cbuildbot tryjobs cannot be started inside the chroot.')
args = CbuildbotArgs(options)
if not CreateBuildrootIfNeeded(options.buildroot):
return 1
# Define the command to run.
cbuildbot = os.path.join(constants.CHROMITE_BIN_DIR, 'cbuildbot')
cmd = [cbuildbot] + args + options.build_configs
# Run the tryjob.
result = cros_build_lib.RunCommand(cmd, debug_level=logging.CRITICAL,
error_code_ok=True, cwd=options.buildroot)
return result.returncode
def DisplayLabel(site_config, options, build_config_name):
"""Decide which display_label to use.
Args:
site_config: config_lib.SiteConfig containing all config info.
options: Parsed command line options for cros tryjob.
build_config_name: Name of the build config we are scheduling.
Returns:
String to use as the cbb_build_label value.
"""
# Production tryjobs always display as production tryjobs.
if options.production:
return config_lib.DISPLAY_LABEL_PRODUCTION_TRYJOB
# Our site_config is only valid for the current branch. If the build
# config is known and has an explicit display_label, use it.
# to be 'master'.
if (options.branch == 'master' and
build_config_name in site_config and
site_config[build_config_name].display_label):
return site_config[build_config_name].display_label
# Fall back to default.
return config_lib.DISPLAY_LABEL_TRYJOB
def FindUserEmail(options):
"""Decide which email address is submitting the job.
Args:
options: Parsed command line options for cros tryjob.
Returns:
Email address for the tryjob as a string.
"""
if options.committer_email:
return options.committer_email
cwd = os.path.dirname(os.path.realpath(__file__))
return git.GetProjectUserEmail(cwd)
def PushLocalPatches(site_config, local_patches, user_email, dryrun=False):
"""Push local changes to a remote ref, and generate args to send.
Args:
site_config: config_lib.SiteConfig containing all config info.
local_patches: patch_pool.local_patches from verified patch_pool.
user_email: Unique id for user submitting this tryjob.
dryrun: Is this a dryrun? If so, don't really push.
Returns:
List of strings to pass to builder to include these patches.
"""
manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
current_time = str(int(time.time()))
ref_base = os.path.join('refs/tryjobs', user_email, current_time)
extra_args = []
for patch in local_patches:
# Isolate the name; if it's a tag or a remote, let through.
# Else if it's a branch, get the full branch name minus refs/heads.
local_branch = git.StripRefsHeads(patch.ref, False)
ref_final = os.path.join(ref_base, local_branch, patch.sha1)
checkout = patch.GetCheckout(manifest)
checkout.AssertPushable()
print('Uploading patch %s' % patch)
patch.Upload(checkout['push_url'], ref_final, dryrun=dryrun)
# TODO(rcui): Pass in the remote instead of tag. http://crosbug.com/33937.
tag = constants.EXTERNAL_PATCH_TAG
if checkout['remote'] == site_config.params.INTERNAL_REMOTE:
tag = constants.INTERNAL_PATCH_TAG
extra_args.append('--remote-patches=%s:%s:%s:%s:%s'
% (patch.project, local_branch, ref_final,
patch.tracking_branch, tag))
return extra_args
def RunRemote(site_config, options, patch_pool):
"""Schedule remote tryjobs."""
logging.info('Scheduling remote tryjob(s): %s',
', '.join(options.build_configs))
user_email = FindUserEmail(options)
# Figure out the cbuildbot command line to pass in.
args = CbuildbotArgs(options)
args += PushLocalPatches(
site_config, patch_pool.local_patches, user_email)
logging.info('Submitting tryjob...')
results = []
for build_config in options.build_configs:
tryjob = request_build.RequestBuild(
build_config=build_config,
display_label=DisplayLabel(site_config, options, build_config),
branch=options.branch,
extra_args=args,
user_email=user_email,
)
results.append(tryjob.Submit(dryrun=False))
if options.json:
# Just is a list of dicts, not a list of lists.
print(json.dumps([r._asdict() for r in results]))
else:
print('Tryjob submitted!')
print('To view your tryjobs, visit:')
for r in results:
print(' %s' % r.url)
def AdjustOptions(options):
"""Set defaults that require some logic.
Args:
options: Parsed cros tryjob tryjob arguments.
site_config: config_lib.SiteConfig containing all config info.
"""
if options.buildroot:
return
if options.where == CBUILDBOT:
options.buildroot = os.path.join(
os.path.dirname(constants.SOURCE_ROOT), 'cbuild')
if options.where == LOCAL:
options.buildroot = os.path.join(
os.path.dirname(constants.SOURCE_ROOT), 'tryjob')
def VerifyOptions(options, site_config):
"""Verify that our command line options make sense.
Args:
options: Parsed cros tryjob tryjob arguments.
site_config: config_lib.SiteConfig containing all config info.
"""
# Handle --list before checking that everything else is valid.
if options.list:
PrintKnownConfigs(site_config,
options.production,
options.build_configs)
raise cros_build_lib.DieSystemExit(0) # Exit with success code.
# Validate specified build_configs.
if not options.build_configs:
cros_build_lib.Die('At least one build_config is required.')
unknown_build_configs = [b for b in options.build_configs
if b not in site_config]
if unknown_build_configs and not options.yes:
prompt = ('Unknown build configs; are you sure you want to schedule '
'for %s?' % ', '.join(unknown_build_configs))
if not cros_build_lib.BooleanPrompt(prompt=prompt, default=False):
cros_build_lib.Die('No confirmation.')
# Ensure that production configs are only run with --production.
if not (options.production or options.where == CBUILDBOT):
# We can't know if branched configs are tryjob safe.
# It should always be safe to run a tryjob config with --production.
prod_configs = []
for b in options.build_configs:
if b in site_config and not config_lib.isTryjobConfig(site_config[b]):
prod_configs.append(b)
if prod_configs:
# Die, and explain why.
alternative_configs = ['%s-tryjob' % b for b in prod_configs]
msg = ('These configs are not tryjob safe:\n'
' %s\n'
'Consider these configs instead:\n'
' %s\n'
'See go/cros-explicit-tryjob-build-configs-psa.' %
(', '.join(prod_configs), ', '.join(alternative_configs)))
if options.branch == 'master':
# On master branch, we know the status of configs for sure.
cros_build_lib.Die(msg)
elif not options.yes:
# On branches, we are just guessing. Let people override.
prompt = '%s\nAre you sure you want to continue?' % msg
if not cros_build_lib.BooleanPrompt(prompt=prompt, default=False):
cros_build_lib.Die('No confirmation.')
patches_given = options.gerrit_patches or options.local_patches
if options.production:
# Make sure production builds don't have patches.
if patches_given and not options.debug:
cros_build_lib.Die('Patches cannot be included in production builds.')
elif options.where != CBUILDBOT:
# Ask for confirmation if there are no patches to test.
if not patches_given and not options.yes:
prompt = ('No patches were provided; are you sure you want to just '
'run a build of %s?' % (
options.branch if options.branch else 'ToT'))
if not cros_build_lib.BooleanPrompt(prompt=prompt, default=False):
cros_build_lib.Die('No confirmation.')
if options.where == REMOTE and options.buildroot:
cros_build_lib.Die('--buildroot is not used for remote tryjobs.')
if options.where != REMOTE and options.json:
cros_build_lib.Die('--json can only be used for remote tryjobs.')
@command.CommandDecorator('tryjob')
class TryjobCommand(command.CliCommand):
"""Schedule a tryjob."""
EPILOG = """
Remote Examples:
cros tryjob -g 123 lumpy-compile-only-pre-cq
cros tryjob -g 123 -g 456 lumpy-compile-only-pre-cq daisy-pre-cq
cros tryjob -g *123 --hwtest daisy-paladin-tryjob
cros tryjob -p chromiumos/chromite lumpy-compile-only-pre-cq
cros tryjob -p chromiumos/chromite:foo_branch lumpy-paladin-tryjob
Local Examples:
cros tryjob --local -g 123 daisy-paladin-tryjob
cros tryjob --local --buildroot /my/cool/path -g 123 daisy-paladin-tryjob
Production Examples (danger, can break production if misused):
cros tryjob --production --branch release-R61-9765.B asuka-release
cros tryjob --production --version 9795.0.0 --channel canary lumpy-payloads
List Examples:
cros tryjob --list
cros tryjob --production --list
cros tryjob --list lumpy
cros tryjob --list lumpy vmtest
"""
@classmethod
def AddParser(cls, parser):
"""Adds a parser."""
super(cls, TryjobCommand).AddParser(parser)
parser.add_argument(
'build_configs', nargs='*',
help='One or more configs to build.')
parser.add_argument(
'-b', '--branch', default='master',
help='The manifest branch to test. The branch to '
'check the buildroot out to.')
parser.add_argument(
'--profile', dest='passthrough', action='append_option_value',
help='Name of profile to sub-specify board variant.')
parser.add_argument(
'--yes', action='store_true', default=False,
help='Never prompt to confirm.')
parser.add_argument(
'--production', action='store_true', default=False,
help='This is a production build, NOT a test build. '
'Confirm with Chrome OS deputy before use.')
parser.add_argument(
'--pass-through', dest='passthrough_raw', action='append',
help='Arguments to pass to cbuildbot. To be avoided.'
'Confirm with Chrome OS deputy before use.')
parser.add_argument(
'--json', action='store_true', default=False,
help='Return details of remote tryjob in script friendly output.')
# Do we build locally, on on a trybot builder?
where_group = parser.add_argument_group(
'Where',
description='Where do we run the tryjob?')
where_ex = where_group.add_mutually_exclusive_group()
where_ex.add_argument(
'--remote', dest='where', action='store_const', const=REMOTE,
default=REMOTE,
help='Run the tryjob on a remote builder. (default)')
where_ex.add_argument(
'--swarming', dest='where', action='store_const', const=REMOTE,
help='Run the tryjob on a swarming builder. (deprecated)')
where_ex.add_argument(
'--local', dest='where', action='store_const', const=LOCAL,
help='Run the tryjob on your local machine.')
where_ex.add_argument(
'--cbuildbot', dest='where', action='store_const', const=CBUILDBOT,
help='Run special local build from current checkout in buildroot.')
where_group.add_argument(
'-r', '--buildroot', type='path', dest='buildroot',
help='Root directory to use for the local tryjob. '
'NOT the current checkout.')
# What patches do we include in the build?
what_group = parser.add_argument_group(
'Patch',
description='Which patches should be included with the tryjob?')
what_group.add_argument(
'-g', '--gerrit-patches', action='split_extend', default=[],
metavar='Id1 *int_Id2...IdN',
help='Space-separated list of short-form Gerrit '
"Change-Id's or change numbers to patch. "
"Please prepend '*' to internal Change-Id's")
# We have to format metavar poorly to workaround an argparse bug.
# https://bugs.python.org/issue11874
what_group.add_argument(
'-p', '--local-patches', action='split_extend', default=[],
metavar="'<project1>[:<branch1>] ... <projectN>[:<branchN>] '",
help='Space-separated list of project branches with '
'patches to apply. Projects are specified by name. '
'If no branch is specified the current branch of the '
'project will be used. NOTE: -p is known to be buggy; '
'prefer using -g instead (see https://crbug.com/806963 '
'and https://crbug.com/807834).')
# Identifing the request.
who_group = parser.add_argument_group(
'Requestor',
description='Who is submitting the jobs?')
who_group.add_argument(
'--committer-email',
help='Override default git committer email.')
# Modify the build.
how_group = parser.add_argument_group(
'Modifiers',
description='How do we modify build behavior?')
how_group.add_argument(
'--latest-toolchain', dest='passthrough', action='append_option',
help='Use the latest toolchain.')
how_group.add_argument(
'--nochromesdk', dest='passthrough', action='append_option',
help="Don't run the ChromeSDK stage which builds "
'Chrome outside of the chroot.')
how_group.add_argument(
'--timeout', dest='passthrough', action='append_option_value',
help='Specify the maximum amount of time this job '
'can run for, at which point the build will be '
'aborted. If set to zero, then there is no '
'timeout.')
how_group.add_argument(
'--sanity-check-build', dest='passthrough', action='append_option',
help='Run the build as a sanity check build.')
how_group.add_argument(
'--chrome_version', dest='passthrough', action='append_option_value',
help='Used with SPEC logic to force a particular '
'git revision of chrome rather than the latest. '
'HEAD is a valid value.')
how_group.add_argument(
'--debug-cidb', dest='passthrough', action='append_option',
help='Force Debug CIDB to be used.')
# Overrides for the build configs testing behaviors.
test_group = parser.add_argument_group(
'Testing Flags',
description='How do we change testing behavior?')
test_group.add_argument(
'--hwtest', dest='passthrough', action='append_option',
help='Enable hwlab testing. Default false.')
test_group.add_argument(
'--notests', dest='passthrough', action='append_option',
help='Override values from buildconfig, run no '
'tests, and build no autotest artifacts.')
test_group.add_argument(
'--novmtests', dest='passthrough', action='append_option',
help='Override values from buildconfig, run no vmtests.')
test_group.add_argument(
'--noimagetests', dest='passthrough', action='append_option',
help='Override values from buildconfig and run no image tests.')
# <board>-payloads tryjob specific options.
payloads_group = parser.add_argument_group(
'Payloads',
description='Options only used by payloads tryjobs.')
payloads_group.add_argument(
'--version', dest='passthrough', action='append_option_value',
help='Specify the release version for payload regeneration. '
'Ex: 9799.0.0')
payloads_group.add_argument(
'--channel', dest='passthrough', action='append_option_value',
help='Specify a channel for a payloads trybot. Can '
'be specified multiple times. No valid for '
'non-payloads configs.')
# branch_util tryjob specific options.
branch_util_group = parser.add_argument_group(
'branch_util',
description='Options only used by branch-util tryjobs.')
branch_util_group.add_argument(
'--branch-name', dest='passthrough', action='append_option_value',
help='The branch to create or delete.')
branch_util_group.add_argument(
'--delete-branch', dest='passthrough', action='append_option',
help='Delete the branch specified in --branch-name.')
branch_util_group.add_argument(
'--rename-to', dest='passthrough', action='append_option_value',
help='Rename a branch to the specified name.')
branch_util_group.add_argument(
'--force-create', dest='passthrough', action='append_option',
help='Overwrites an existing branch.')
branch_util_group.add_argument(
'--skip-remote-push', dest='passthrough', action='append_option',
help='Do not actually push to remote git repos. '
'Used for end-to-end testing branching.')
configs_group = parser.add_argument_group(
'Configs',
description='Options for displaying available build configs.')
configs_group.add_argument(
'-l', '--list', action='store_true', dest='list', default=False,
help='List the trybot configs (adjusted by --production).')
def Run(self):
"""Runs `cros tryjob`."""
site_config = config_lib.GetConfig()
AdjustOptions(self.options)
self.options.Freeze()
VerifyOptions(self.options, site_config)
logging.info('Verifying patches...')
patch_pool = trybot_patch_pool.TrybotPatchPool.FromOptions(
gerrit_patches=self.options.gerrit_patches,
local_patches=self.options.local_patches,
sourceroot=constants.SOURCE_ROOT,
remote_patches=[])
if self.options.where == REMOTE:
return RunRemote(site_config, self.options, patch_pool)
elif self.options.where == LOCAL:
return RunLocal(self.options)
elif self.options.where == CBUILDBOT:
return RunCbuildbot(self.options)
else:
raise Exception('Unknown options.where: %s', self.options.where)
|
# coding: spec
from photons_control.colour import ColourParser, make_hsbks
import pytest
describe "make_hsbks":
@pytest.fixture()
def colors(self):
return [
["red", 10],
["blue", 3],
["hue:78 brightness:0.5", 5],
["#234455", 2],
[[100], 1],
[[100, 0.5], 1],
[[100, 0.5, 0.5], 1],
[[100, 0.5, 0.5, 9000], 1],
[[0, 0, 0, 0], 1],
[(120, 1, 1, 9000), 1],
[{"hue": 100}, 1],
[{"hue": 100, "saturation": 0.5}, 1],
[{"hue": 100, "saturation": 0.5, "brightness": 0.5}, 1],
[{"hue": 100, "saturation": 0.5, "brightness": 0.5, "kelvin": 9000}, 1],
[{"hue": 0, "saturation": 0, "brightness": 0, "kelvin": 0}, 1],
[(120, 1, 1, 9000), 1],
]
it "can make colors", colors:
def hsbk(*args, **kwargs):
h, s, b, k = ColourParser.hsbk(*args, **kwargs)
return {"hue": h, "saturation": s, "brightness": b, "kelvin": k}
colorRed = hsbk("red", overrides={"brightness": 1.0, "kelvin": 3500})
colorBlue = hsbk("blue", overrides={"brightness": 1.0, "kelvin": 3500})
colorHSBK = hsbk("hue:78 brightness:0.5", overrides={"saturation": 0, "kelvin": 3500})
colorHEX = hsbk("#234455", overrides={"kelvin": 3500})
expected = [colorRed] * 10 + [colorBlue] * 3 + [colorHSBK] * 5 + [colorHEX] * 2
for _ in range(2):
expected.append({"hue": 100, "saturation": 0, "brightness": 1, "kelvin": 3500})
expected.append({"hue": 100, "saturation": 0.5, "brightness": 1, "kelvin": 3500})
expected.append({"hue": 100, "saturation": 0.5, "brightness": 0.5, "kelvin": 3500})
expected.append({"hue": 100, "saturation": 0.5, "brightness": 0.5, "kelvin": 9000})
expected.append({"hue": 0, "saturation": 0, "brightness": 0, "kelvin": 0})
expected.append({"hue": 120, "saturation": 1, "brightness": 1, "kelvin": 9000})
got = list(make_hsbks(colors))
for i, (g, e) in enumerate(zip(got, expected)):
if g != e:
print(i)
print(f"\tGOT : {g}")
print(f"\tWANT: {e}")
print()
assert got == expected
it "can overrides hue", colors:
colors = list(make_hsbks(colors, overrides={"hue": 1}))
for c in colors:
assert c["hue"] == 1
it "can overrides saturation", colors:
colors = list(make_hsbks(colors, overrides={"saturation": 0.3}))
for c in colors:
assert c["saturation"] == 0.3
it "can overrides brightness", colors:
colors = list(make_hsbks(colors, overrides={"brightness": 0.6}))
for c in colors:
assert c["brightness"] == 0.6
it "can overrides kelvin", colors:
colors = list(make_hsbks(colors, overrides={"kelvin": 8000}))
for c in colors:
assert c["kelvin"] == 8000
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name='dtickrange',
parent_name='contourcarpet.colorbar.tickformatstop',
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
items=kwargs.pop(
'items', [
{
'valType': 'any',
'editType': 'colorbars'
}, {
'valType': 'any',
'editType': 'colorbars'
}
]
),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='test how can use cookie',
author='Ahmed Fouad',
license='',
)
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmarking the library on inference and training in PyTorch.
"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..file_utils import is_py3nvml_available, is_tf_available
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_py3nvml_available():
import py3nvml.py3nvml as nvml
logger = logging.get_logger(__name__)
def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool):
def run_func(func):
@wraps(func)
def run_in_eager_mode(*args, **kwargs):
return func(*args, **kwargs)
@wraps(func)
@tf.function(experimental_compile=use_xla)
def run_in_graph_mode(*args, **kwargs):
return func(*args, **kwargs)
if do_eager_mode is True:
assert (
use_xla is False
), "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`."
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]:
rng = random.Random()
values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)]
return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
class TensorFlowBenchmark(Benchmark):
args: TensorFlowBenchmarkArguments
configs: PretrainedConfig
framework: str = "TensorFlow"
@property
def framework_version(self):
return tf.__version__
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
# initialize GPU on separate process
strategy = self.args.strategy
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_speed(_inference)
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
strategy = self.args.strategy
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_speed(_train)
def _inference_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
strategy = self.args.strategy
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_memory(_inference)
def _train_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
strategy = self.args.strategy
assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_memory(_train)
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
config = self.config_dict[model_name]
if self.args.fp16:
raise NotImplementedError("Mixed precision is currently not supported.")
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
transformers_module = __import__("transformers", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
else:
model = TF_MODEL_MAPPING[config.__class__](config)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
def encoder_decoder_forward():
return model(input_ids, decoder_input_ids=input_ids, training=False)
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
def encoder_forward():
return model(input_ids, training=False)
_inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
config = self.config_dict[model_name]
assert (
self.args.eager_mode is False
), "Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`."
if self.args.fp16:
raise NotImplementedError("Mixed precision is currently not supported.")
has_model_class_in_config = (
hasattr(config, "architectures")
and isinstance(config.architectures, list)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
transformers_module = __import__("transformers", fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
)
else:
model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
# encoder-decoder has vocab size saved differently
vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
def encoder_decoder_train():
loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0]
gradients = tf.gradients(loss, model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
def encoder_train():
loss = model(input_ids, labels=input_ids, training=True)[0]
gradients = tf.gradients(loss, model.trainable_variables)
return gradients
_train = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _measure_speed(self, func) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
timeit.repeat(func, repeat=1, number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
runtimes = timeit.repeat(
func,
repeat=self.args.repeat,
number=10,
)
return min(runtimes) / 10.0
except ResourceExhaustedError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than"
"it might need to speed up computation."
"The memory reported here corresponds to the memory"
"reported by `nvidia-smi`, which can vary depending"
"on total available memory on the GPU that is used."
)
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
assert (
self.args.eager_mode
), "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory consumption line by line."
trace = start_memory_tracing("transformers")
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `args.memory=False`"
)
elif self.args.is_gpu:
# gpu
if not is_py3nvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU."
)
memory = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU."
)
# init nvml
nvml.nvmlInit()
func()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
max_bytes_in_use = meminfo.used
memory = Memory(max_bytes_in_use)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in TensorFlow."
)
memory = None
else:
memory_bytes = measure_peak_memory_cpu(func)
memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
if self.args.trace_memory_line_by_line:
summary = stop_memory_tracing(trace)
if memory is None:
memory = summary.total
else:
summary = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
return "N/A", None
|
"""A scene suitable for usage with :class:`.SampleSpace`."""
__all__ = ["SampleSpaceScene"]
from ..animation.animation import Animation
from ..animation.transform import MoveToTarget
from ..animation.transform import Transform
from ..animation.update import UpdateFromFunc
from ..constants import *
from ..scene.scene import Scene
from ..mobject.probability import SampleSpace
from ..mobject.types.vectorized_mobject import VGroup
class SampleSpaceScene(Scene):
def get_sample_space(self, **config):
self.sample_space = SampleSpace(**config)
return self.sample_space
def add_sample_space(self, **config):
self.add(self.get_sample_space(**config))
def get_division_change_animations(
self, sample_space, parts, p_list, dimension=1, new_label_kwargs=None, **kwargs
):
if new_label_kwargs is None:
new_label_kwargs = {}
anims = []
p_list = sample_space.complete_p_list(p_list)
space_copy = sample_space.copy()
vect = DOWN if dimension == 1 else RIGHT
parts.generate_target()
for part, p in zip(parts.target, p_list):
part.replace(space_copy, stretch=True)
part.stretch(p, dimension)
parts.target.arrange(vect, buff=0)
parts.target.move_to(space_copy)
anims.append(MoveToTarget(parts))
if hasattr(parts, "labels") and parts.labels is not None:
label_kwargs = parts.label_kwargs
label_kwargs.update(new_label_kwargs)
new_braces, new_labels = sample_space.get_subdivision_braces_and_labels(
parts.target, **label_kwargs
)
anims += [
Transform(parts.braces, new_braces),
Transform(parts.labels, new_labels),
]
return anims
def get_horizontal_division_change_animations(self, p_list, **kwargs):
assert hasattr(self.sample_space, "horizontal_parts")
return self.get_division_change_animations(
self.sample_space,
self.sample_space.horizontal_parts,
p_list,
dimension=1,
**kwargs,
)
def get_vertical_division_change_animations(self, p_list, **kwargs):
assert hasattr(self.sample_space, "vertical_parts")
return self.get_division_change_animations(
self.sample_space,
self.sample_space.vertical_parts,
p_list,
dimension=0,
**kwargs,
)
def get_conditional_change_anims(
self, sub_sample_space_index, value, post_rects=None, **kwargs
):
parts = self.sample_space.horizontal_parts
sub_sample_space = parts[sub_sample_space_index]
anims = self.get_division_change_animations(
sub_sample_space,
sub_sample_space.vertical_parts,
value,
dimension=0,
**kwargs,
)
if post_rects is not None:
anims += self.get_posterior_rectangle_change_anims(post_rects)
return anims
def get_top_conditional_change_anims(self, *args, **kwargs):
return self.get_conditional_change_anims(0, *args, **kwargs)
def get_bottom_conditional_change_anims(self, *args, **kwargs):
return self.get_conditional_change_anims(1, *args, **kwargs)
def get_prior_rectangles(self):
return VGroup(
*[self.sample_space.horizontal_parts[i].vertical_parts[0] for i in range(2)]
)
def get_posterior_rectangles(self, buff=MED_LARGE_BUFF):
prior_rects = self.get_prior_rectangles()
areas = [rect.get_width() * rect.get_height() for rect in prior_rects]
total_area = sum(areas)
total_height = prior_rects.get_height()
post_rects = prior_rects.copy()
for rect, area in zip(post_rects, areas):
rect.stretch_to_fit_height(total_height * area / total_area)
rect.stretch_to_fit_width(area / rect.get_height())
post_rects.arrange(DOWN, buff=0)
post_rects.next_to(self.sample_space, RIGHT, buff)
return post_rects
def get_posterior_rectangle_braces_and_labels(
self, post_rects, labels, direction=RIGHT, **kwargs
):
return self.sample_space.get_subdivision_braces_and_labels(
post_rects, labels, direction, **kwargs
)
def update_posterior_braces(self, post_rects):
braces = post_rects.braces
labels = post_rects.labels
for rect, brace, label in zip(post_rects, braces, labels):
brace.stretch_to_fit_height(rect.get_height())
brace.next_to(rect, RIGHT, SMALL_BUFF)
label.next_to(brace, RIGHT, SMALL_BUFF)
def get_posterior_rectangle_change_anims(self, post_rects):
def update_rects(rects):
new_rects = self.get_posterior_rectangles()
Transform(rects, new_rects).update(1)
if hasattr(rects, "braces"):
self.update_posterior_braces(rects)
return rects
anims = [UpdateFromFunc(post_rects, update_rects)]
if hasattr(post_rects, "braces"):
anims += list(map(Animation, [post_rects.labels, post_rects.braces]))
return anims
|
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
# CONSTANTS
# categorization of a run result
# 'correct' and 'wrong' refer to whether the tool's result matches the expected result.
# 'confirmed' and 'unconfirmed' refer to whether the tool's result was confirmed (e.g., by witness validation)
CATEGORY_CORRECT = 'correct'
"""run result given by tool is correct (we use 'correct' instead of 'correct-confirmed')"""
CATEGORY_CORRECT_UNCONFIRMED = 'correct-unconfirmed'
"""run result given by tool is correct but not confirmed"""
CATEGORY_WRONG = 'wrong'
"""run result given by tool is wrong (we use 'wrong' instead of 'wrong-unconfirmed')"""
#CATEGORY_WRONG_CONFIRMED = 'wrong-confirmed'
"""run result given by tool is wrong but confirmed by result validation"""
CATEGORY_UNKNOWN = 'unknown'
"""run result given by tool is "unknown" (i.e., no answer)"""
CATEGORY_ERROR = 'error'
"""tool failed, crashed, or hit a resource limit"""
CATEGORY_MISSING = 'missing'
"""BenchExec could not determine whether run result was correct or wrong
because no property was defined, and no other categories apply."""
# internal property names used in this module (should not contain spaces)
# previously used by SV-COMP (http://sv-comp.sosy-lab.org/2014/rules.php):
_PROP_LABEL = 'unreach-label'
# currently used by SV-COMP (http://sv-comp.sosy-lab.org/2016/rules.php):
_PROP_CALL = 'unreach-call'
_PROP_TERMINATION = 'termination'
_PROP_OVERFLOW = 'no-overflow'
_PROP_DEADLOCK = 'no-deadlock'
_PROP_DEREF = 'valid-deref'
_PROP_FREE = 'valid-free'
_PROP_MEMTRACK = 'valid-memtrack'
_PROP_MEMCLEANUP = 'valid-memcleanup'
# for Java verification:
_PROP_ASSERT = 'assert'
# specification given as an automaton:
_PROP_AUTOMATON = 'observer-automaton'
# for solvers:
_PROP_SAT = 'sat'
STR_FALSE = 'false' # only for special cases. STR_FALSE is no official result, because property is missing
# possible run results (output of a tool)
RESULT_UNKNOWN = 'unknown'
"""tool could not find out an answer due to incompleteness"""
RESULT_ERROR = 'ERROR' # or any other value not listed here
"""tool could not complete due to an error
(it is recommended to instead use a string with more details about the error)"""
RESULT_TRUE_PROP = 'true'
"""property holds"""
RESULT_FALSE_REACH = STR_FALSE + '(' + _PROP_CALL + ')'
_RESULT_FALSE_REACH_OLD = STR_FALSE + '(reach)'
"""SV-COMP reachability property violated"""
RESULT_FALSE_TERMINATION = STR_FALSE + '(' + _PROP_TERMINATION + ')'
"""SV-COMP termination property violated"""
RESULT_FALSE_OVERFLOW = STR_FALSE + '(' + _PROP_OVERFLOW + ')'
"""SV-COMP overflow property violated"""
RESULT_FALSE_DEADLOCK = STR_FALSE + '(' + _PROP_DEADLOCK + ')'
"""deadlock property violated""" # not yet part of SV-COMP
RESULT_FALSE_DEREF = STR_FALSE + '(' + _PROP_DEREF + ')'
"""SV-COMP valid-deref property violated"""
RESULT_FALSE_FREE = STR_FALSE + '(' + _PROP_FREE + ')'
"""SV-COMP valid-free property violated"""
RESULT_FALSE_MEMTRACK = STR_FALSE + '(' + _PROP_MEMTRACK + ')'
"""SV-COMP valid-memtrack property violated"""
RESULT_FALSE_MEMCLEANUP = STR_FALSE + '(' + _PROP_MEMCLEANUP + ')'
"""SV-COMP valid-memcleanup property violated"""
RESULT_WITNESS_CONFIRMED = 'witness confirmed'
"""SV-COMP property violated and witness confirmed"""
RESULT_SAT = 'sat'
"""task is satisfiable"""
RESULT_UNSAT = 'unsat'
"""task is unsatisfiable"""
# List of all possible results.
# If a result is not in this list, it is handled as RESULT_CLASS_ERROR.
RESULT_LIST = [RESULT_TRUE_PROP, RESULT_UNKNOWN,
RESULT_FALSE_REACH,
_RESULT_FALSE_REACH_OLD,
RESULT_FALSE_TERMINATION,
RESULT_FALSE_DEREF, RESULT_FALSE_FREE, RESULT_FALSE_MEMTRACK,
RESULT_FALSE_MEMCLEANUP,
RESULT_WITNESS_CONFIRMED,
RESULT_SAT, RESULT_UNSAT,
RESULT_FALSE_OVERFLOW, RESULT_FALSE_DEADLOCK
]
# Classification of results
RESULT_CLASS_TRUE = 'true'
RESULT_CLASS_FALSE = 'false'
RESULT_CLASS_UNKNOWN = 'unknown'
RESULT_CLASS_ERROR = 'error'
# This maps content of property files to property name.
_PROPERTY_NAMES = {'LTL(G ! label(': _PROP_LABEL,
'LTL(G assert)': _PROP_ASSERT,
'LTL(G ! call(__VERIFIER_error()))': _PROP_CALL,
'LTL(F end)': _PROP_TERMINATION,
'LTL(G valid-free)': _PROP_FREE,
'LTL(G valid-deref)': _PROP_DEREF,
'LTL(G valid-memtrack)': _PROP_MEMTRACK,
'LTL(G valid-memcleanup)': _PROP_MEMCLEANUP,
'OBSERVER AUTOMATON': _PROP_AUTOMATON,
'SATISFIABLE': _PROP_SAT,
'LTL(G ! overflow)': _PROP_OVERFLOW,
'LTL(G ! deadlock)': _PROP_DEADLOCK,
}
# This maps a possible result substring of a file name
# to the expected result string of the tool and the set of properties
# for which this result is relevant.
_FILE_RESULTS = {
'_true-unreach-label': (RESULT_TRUE_PROP, {_PROP_LABEL}),
'_true-unreach-call': (RESULT_TRUE_PROP, {_PROP_CALL}),
'_true_assert': (RESULT_TRUE_PROP, {_PROP_ASSERT}),
'_true-assert': (RESULT_TRUE_PROP, {_PROP_ASSERT}),
'_true-termination': (RESULT_TRUE_PROP, {_PROP_TERMINATION}),
'_true-valid-deref': (RESULT_TRUE_PROP, {_PROP_DEREF}),
'_true-valid-free': (RESULT_TRUE_PROP, {_PROP_FREE}),
'_true-valid-memtrack': (RESULT_TRUE_PROP, {_PROP_MEMTRACK}),
'_true-valid-memcleanup':(RESULT_TRUE_PROP, {_PROP_MEMCLEANUP}),
'_true-valid-memsafety': (RESULT_TRUE_PROP, {_PROP_DEREF, _PROP_FREE, _PROP_MEMTRACK}),
'_true-no-overflow': (RESULT_TRUE_PROP, {_PROP_OVERFLOW}),
'_true-no-deadlock': (RESULT_TRUE_PROP, {_PROP_DEADLOCK}),
'_false-unreach-label': (RESULT_FALSE_REACH, {_PROP_LABEL}),
'_false-unreach-call': (RESULT_FALSE_REACH, {_PROP_CALL}),
'_false_assert': (RESULT_FALSE_REACH, {_PROP_ASSERT}),
'_false-assert': (RESULT_FALSE_REACH, {_PROP_ASSERT}),
'_false-termination': (RESULT_FALSE_TERMINATION, {_PROP_TERMINATION}),
'_false-valid-deref': (RESULT_FALSE_DEREF, {_PROP_DEREF}),
'_false-valid-free': (RESULT_FALSE_FREE, {_PROP_FREE}),
'_false-valid-memtrack': (RESULT_FALSE_MEMTRACK, {_PROP_MEMTRACK}),
'_false-valid-memcleanup':(RESULT_FALSE_MEMCLEANUP, {_PROP_MEMCLEANUP}),
'_false-no-overflow': (RESULT_FALSE_OVERFLOW, {_PROP_OVERFLOW}),
'_false-no-deadlock': (RESULT_FALSE_DEADLOCK, {_PROP_DEADLOCK}),
'_sat': (RESULT_SAT, {_PROP_SAT}),
'_unsat': (RESULT_UNSAT, {_PROP_SAT}),
}
# Map a property to all possible results for it.
_VALID_RESULTS_PER_PROPERTY = {
_PROP_ASSERT: {RESULT_TRUE_PROP, RESULT_FALSE_REACH},
_PROP_LABEL: {RESULT_TRUE_PROP, RESULT_FALSE_REACH},
_PROP_CALL: {RESULT_TRUE_PROP, RESULT_FALSE_REACH},
_PROP_AUTOMATON: {RESULT_TRUE_PROP, RESULT_FALSE_REACH},
_PROP_DEREF: {RESULT_TRUE_PROP, RESULT_FALSE_DEREF},
_PROP_FREE: {RESULT_TRUE_PROP, RESULT_FALSE_FREE},
_PROP_MEMTRACK: {RESULT_TRUE_PROP, RESULT_FALSE_MEMTRACK},
_PROP_MEMCLEANUP: {RESULT_TRUE_PROP, RESULT_FALSE_MEMCLEANUP},
_PROP_OVERFLOW: {RESULT_TRUE_PROP, RESULT_FALSE_OVERFLOW},
_PROP_DEADLOCK: {RESULT_TRUE_PROP, RESULT_FALSE_DEADLOCK},
_PROP_TERMINATION: {RESULT_TRUE_PROP, RESULT_FALSE_TERMINATION},
_PROP_SAT: {RESULT_SAT, RESULT_UNSAT},
}
# Score values taken from http://sv-comp.sosy-lab.org/
# If different scores should be used depending on the checked property,
# change score_for_task() appropriately
# (use values 0 to disable scores completely for a given property).
_SCORE_CORRECT_TRUE = 2
_SCORE_CORRECT_UNCONFIRMED_TRUE = 1
_SCORE_CORRECT_FALSE = 1
_SCORE_CORRECT_UNCONFIRMED_FALSE = 0
_SCORE_UNKNOWN = 0
_SCORE_WRONG_FALSE = -16
_SCORE_WRONG_TRUE = -32
def _expected_result(filename, checked_properties):
results = []
for (filename_part, (expected_result, for_properties)) in _FILE_RESULTS.items():
if filename_part in filename \
and for_properties.intersection(checked_properties):
results.append(expected_result)
if not results:
# No expected result for any of the properties
return None
if len(results) > 1:
# Multiple checked properties per file not supported
return None
return results[0]
def properties_of_file(propertyfile):
"""
Return a list of property names that should be checked according to the given property file.
@param propertyfile: None or a file name of a property file.
@return: A possibly empty list of property names.
"""
assert os.path.isfile(propertyfile)
with open(propertyfile) as f:
content = f.read().strip()
if not( 'CHECK' in content
or content == 'OBSERVER AUTOMATON'
or content == 'SATISFIABLE'
):
sys.exit('File "{0}" is not a valid property file.'.format(propertyfile))
properties = []
# TODO: should we switch to regex or line-based reading?
for substring, status in _PROPERTY_NAMES.items():
if substring in content:
properties.append(status)
if not properties:
sys.exit('File "{0}" does not contain a known property.'.format(propertyfile))
return properties
def satisfies_file_property(filename, properties):
"""
Tell whether the given properties are violated or satisfied in a given file.
Assumption: Currently, only one expected result per set of properties is supported.
@param filename: The file name of the input file.
@param properties: The list of properties to check (as returned by properties_of_file()).
@return True if the properties are satisfied; False if it is violated; None if it is unknown
"""
expected_result = _expected_result(filename, properties)
if not expected_result:
return None
expected_result_class = get_result_classification(expected_result)
if expected_result_class == RESULT_CLASS_TRUE:
return True
if expected_result_class == RESULT_CLASS_FALSE:
return False
return None
def score_for_task(filename, properties, category, result):
"""
Return the possible score of task, depending on whether the result is correct or not.
Pass category=result.CATEGORY_CORRECT and result=None to calculate the maximum possible score.
"""
if category == CATEGORY_CORRECT_UNCONFIRMED:
if satisfies_file_property(filename, properties):
return _SCORE_CORRECT_UNCONFIRMED_TRUE
else:
return _SCORE_CORRECT_UNCONFIRMED_FALSE
if category != CATEGORY_CORRECT and category != CATEGORY_WRONG:
return 0
if _PROP_SAT in properties:
return 0
correct = (category == CATEGORY_CORRECT)
expected = satisfies_file_property(filename, properties)
if expected is None:
return 0
elif expected == True:
# expected result is "true", result was "true" or "false"
return _SCORE_CORRECT_TRUE if correct else _SCORE_WRONG_FALSE
elif expected == False:
if correct:
# expected result is "false", result was "false" with correct property
return _SCORE_CORRECT_FALSE
else:
assert result, "Cannot compute score without actual tool result"
result_class = get_result_classification(result)
if result_class == RESULT_CLASS_TRUE:
# expected result is "false", result was "true"
return _SCORE_WRONG_TRUE
elif result_class == RESULT_CLASS_FALSE:
# expected result is "false", result was "false" but with wrong property
return _SCORE_WRONG_FALSE
else:
assert False, "unexpected result classification " + result_class + " for result " + result
else:
assert False, "unexpected return value from satisfies_file_property: " + expected
def _file_is_java(filename):
# Java benchmarks have as filename their main class, so we cannot check for '.java'
return '_assert' in filename
def get_result_classification(result):
'''
Classify the given result into "true" (property holds),
"false" (property does not hold), "unknown", and "error".
@param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized).
@return One of RESULT_CLASS_* strings
'''
if result not in RESULT_LIST:
return RESULT_CLASS_ERROR
if result == RESULT_UNKNOWN:
return RESULT_CLASS_UNKNOWN
if result == RESULT_TRUE_PROP or result == RESULT_SAT:
return RESULT_CLASS_TRUE
else:
return RESULT_CLASS_FALSE
def get_result_category(filename, result, properties):
'''
This function determines the relation between actual result and expected result
for the given file and properties.
@param filename: The file name of the input file.
@param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized).
@param properties: The list of properties to check (as returned by properties_of_file()).
@return One of the CATEGORY_* strings.
'''
assert set(properties).issubset(_VALID_RESULTS_PER_PROPERTY.keys())
if result not in RESULT_LIST:
return CATEGORY_ERROR
if result == RESULT_UNKNOWN:
return CATEGORY_UNKNOWN
if _file_is_java(filename) and not properties:
# Currently, no property files for checking Java programs exist,
# so we hard-code a check for _PROP_ASSERT for these
properties = [_PROP_ASSERT]
if not properties:
# Without property we cannot return correct or wrong results.
return CATEGORY_MISSING
expected_result = _expected_result(filename, properties)
if not expected_result:
# filename gives no hint on the expected output
return CATEGORY_MISSING
for prop in properties:
if result in _VALID_RESULTS_PER_PROPERTY[prop]:
# tool returned an answer for this property
return CATEGORY_CORRECT if expected_result == result else CATEGORY_WRONG
# tool returned an answer that does not belong to any of the checked properties
return CATEGORY_UNKNOWN
|
from yangvoodoo import Cache, Common, Errors, Types
class Context:
def __init__(self, module, data_access_layer, yang_schema, yang_ctx, log=None):
self.module = module
self.schema = yang_schema
self.schemactx = yang_ctx
self.dal = data_access_layer
self.schemacache = Cache.Cache()
self.log = log
self.yang_module = module
def _trace(self, vnt, yn, context, p):
self.log.trace(
"%s: %s %s\nschema: %s\ndata: %s\nparent of: %s",
vnt,
context,
yn.libyang_node,
yn.real_schema_path,
yn.real_data_path,
p,
)
class Node:
"""
Constraints:
Node based access is provided for a particular yang module, whenever we run 'get_node'
we bind to a particular yang module.
At 10,000ft level this module acts as a facade around the DataAccess methods get(), gets()
set() and delete(). We depend heavily on libyang to inspect the schema on each method.
On calling __getatttr_
a) non-Primitives (i.e. Containers and Lists) return another Object
b) Primtiives return the value itself.
Each time we instantiate an object we store
- module (the name of the yang module - used when forming the xpath)
- path (the fully qualified path, maintaining reference to specific elements of lists)
i.e. /integrationtest:s
Internal Notes:
Things held of a Context
------------------------
- module = the name of the yang module (e.g integrationtest)
- dal = An instantiated object of DataAccess() - one object used for all access.
Note: it is possible to access sysrepo diretly via root._context.dal.session
- schema = A libyang object of the top-level yang module.
- schemactx = A libyang context object
- cache = A cache object to store the schema (assumption here is libyang lookups are expesnive - but
that may not be true. For sysrepo data lookup even if it's expensive we would never choose
to cache that data.
- log = A Log instance which behaves like the python standard logging library.
Things held on node
--------------------
- real_data_path = an XPATH expression for the path - with prefixes and values pointing to exact instances
of data. This is used for fetching data.... e.g.
integrationtest:outsidelist[leafo='its cold outside']/integrationtest:otherinsidelist
[otherlist1='uno'][otherlist2='due'][otherlist3='tre']/integrationtest:language
- real_schema_path = an XPATH expression for the path - with prefixes but no specific instances of data
included. This is used for looking up schema definitions.... e.g.
/integrationtest:outsidelist/integrationtest:otherinsidelist/integrationtest:language
- libyang_node = The libyang node for this schema path of the yang model.
"""
_NODE_TYPE = "Node"
def __init__(self, context, node, parent_self=None):
self.__dict__["_context"] = context
self.__dict__["_node"] = node
self.__dict__["_parent"] = parent_self
self.__dict__["_path"] = node.real_data_path
self._specific_init()
def __name__(self):
return "VoodooNode"
def __repr__(self):
return self._base_repr()
def _base_repr(self):
return "Voodoo%s{%s}" % (self._NODE_TYPE, self._node.real_data_path)
def __del__(self):
pass
# path = self.__dict__['_path']
def _specific_init(self):
pass
def __setitem__(self, arg, val):
if not isinstance(arg, str):
raise ValueError("node['child'] only supports a single argument.")
return self.__setattr__(arg, val)
def __getitem__(self, arg):
if not isinstance(arg, str):
raise ValueError("node['child'] only supports a single argument.")
return self.__getattr__(arg)
def __getattr__(self, attr):
if attr in ("_ipython_canary_method_should_not_exist_", "_repr_mimebundle_"):
raise AttributeError("Go Away!")
context = self.__dict__["_context"]
node = self.__dict__["_node"]
context.log.trace("__getattr__ %s %s", attr, node.real_schema_path)
# path = self.__dict__['_path']
# spath = self.__dict__['_spath']
node_schema = Common.Utils.get_yangnode(node, context, attr)
node_type = node_schema.nodetype()
if node_type == 1:
# assume this is a container (or a presence container)
if node_schema.presence() is None:
# Return Object
context._trace("Container", node_schema, context, self)
return Container(context, node_schema, self)
else:
# Return Object
context._trace("PresenceContainer", node_schema, context, self)
return PresenceContainer(context, node_schema, self)
elif node_type == Types.LIBYANG_NODETYPE["LEAF"]:
# TODO: need to consider unions of enum's in future - there is already a plan for a formalValidator class
leaf_type = node_schema.type().base()
if leaf_type == 5:
# Return Object
context._trace("Empty", node_schema, context, None)
return Empty(context, node_schema)
# if leaf_type == 6:
# return Enum(context, new_xpath, new_spath, node_schema)
context.log.trace(
"Returning Literal value from datastore for %s",
node_schema.real_data_path,
)
dal_value = context.dal.get(
node_schema.real_data_path, default_value=node_schema.default()
)
if dal_value is None and leaf_type in Types.NUMBERS:
return 0
if leaf_type == Types.DATA_ABSTRACTION_MAPPING["BOOLEAN"]:
if dal_value == "true":
return True
elif dal_value == "false":
return False
return dal_value
elif node_type == Types.LIBYANG_NODETYPE["LIST"]:
# Return Object
context._trace("List", node_schema, context, self)
return List(context, node_schema, self)
elif node_type == Types.LIBYANG_NODETYPE["LEAFLIST"]:
# Return Object
context._trace("LeafList", node_schema, context, self)
return LeafList(context, node_schema, self)
elif node_type == Types.LIBYANG_NODETYPE["CHOICE"]:
"""
Note: for choices in terms of data we don't render the 'case'
beertype = "/integrationtest:morecomplex/integrationtest:inner/integrationtest:beer-type/"
list(yangctx.find_path(beertype + "integrationtest:craft/integrationtest:brewdog"))
However: when rendering the lookups for libyang schema we must include the case, which
means the following is invalid.
list(yangctx.find_path(beertype + "integrationtest:brewdog"))
"""
context._trace("Choice", node_schema, context, self)
return Choice(context, node_schema, self)
elif node_type == Types.LIBYANG_NODETYPE["CASE"]:
context._trace("Case", node_schema, context, self)
return Case(context, node_schema, self)
context.log.error(
"The nodetype %s for schema %s is not supported.",
node_type,
node_schema.real_schema_path,
)
raise NotImplementedError(
"The YANG structure at %s of type %s is not supported."
% (node_schema.real_schema_path, node_type)
)
def __setattr__(self, attr, val):
context = self.__dict__["_context"]
node = self.__dict__["_node"]
node_schema = Common.Utils.get_yangnode(node, context, attr)
context.log.trace("__setattr__ %s=%s %s", attr, val, node.real_data_path)
if context.readonly:
raise Errors.ReadonlyError()
if node_schema.nodetype() != Types.LIBYANG_NODETYPE["LEAF"]:
raise Errors.CannotAssignValueToContainingNode(attr)
if node_schema.is_key():
raise Errors.ListKeyCannotBeChanged(node_schema.real_data_path, attr)
leaf_type = node_schema.type().base()
# Enumeration:
if leaf_type == 6:
match = any(
str(enum_valid_val) == str(val)
for (enum_valid_val, _) in node_schema.type().enums()
)
if not match:
self._raise_ValueDoesMatchEnumeration(node_schema, val)
if val is None:
context.dal.delete(node_schema.real_data_path)
return
backend_type = Common.Utils.get_yang_type(
node_schema.type(), val, node_schema.real_data_path
)
context.dal.set(node_schema.real_data_path, val, backend_type)
@staticmethod
def _raise_ValueDoesMatchEnumeration(node_schema, val):
raise Errors.ValueDoesMatchEnumeration(node_schema.real_data_path, val)
def __dir__(self, no_translations=False):
node = self._node
context = self._context
context.log.trace("__dir__ %s", node.real_schema_path)
if node.real_schema_path == "":
search_path = "/" + context.module + ":*"
else:
search_path = node.real_schema_path + "/*"
answer = []
for child in context.schemactx.find_path(search_path):
child_name = child.name()
if child_name in Types.RESERVED_PYTHON_KEYWORDS:
child_name = child_name + "_"
if "-" in child_name and not no_translations:
new_child_name = child_name.replace("-", "_")
child_name = new_child_name
answer.append(child_name)
answer.sort()
return answer
class Empty:
"""
This represents a YANG Empty Leaf, which can be create()d, and remove()d, however they do
not actually store a value.
The presence of the empty leaf can be tested with exists()
In an XML representation they appear as
<emptyleaf/>
"""
_NODE_TYPE = "Empty"
def __init__(self, context, node_schema):
self.__dict__["_context"] = context
self.__dict__["_node"] = node_schema
def __dir__(self):
return []
def create(self):
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
context.dal.set(node.real_data_path, None, 5)
def exists(self):
context = self._context
node = self._node
exists = context.dal.get(node.real_data_path)
return exists is not None
def remove(self):
context = self._context
node = self._node
context.dal.uncreate(node.real_data_path)
def __repr__(self):
node = self._node
path = node.real_data_path
if self.exists():
return "VoodooEmpty{%s} - Exists" % (path)
return "VoodooEmpty{%s} - Does Not Exist" % (path)
class ContainingNode(Node):
pass
class Choice(Node):
_NODE_TYPE = "Choice"
def __repr__(self):
node = self._node
return "Voodoo%s{%s/...%s}" % (
self._NODE_TYPE,
node.real_data_path,
node.real_schema_path.split(":")[-1],
)
class Case(Node):
_NODE_TYPE = "Case"
def __repr__(self):
node = self.__dict__["_node"]
return "Voodoo%s{%s/...%s}" % (
self._NODE_TYPE,
node.real_data_path,
node.real_schema_path.split(":")[-1],
)
class LeafList(Node):
"""
Represents a Leaf List
"""
_NODE_TYPE = "LeafList"
def __dir__(self):
return []
def create(self, value):
"""
Create an entry into a leaf list, returning the value.
If the item already exists in the list this operation will silenty
do nothing.
libyang supports creating leaf-list elements with both single/double quotes,
however when using XPATH to select leaf-list elements this isn't possible.
"""
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
if value == "":
raise Errors.ListItemCannotBeBlank(node.real_data_path)
backend_type = Common.Utils.get_yang_type_from_path(
context, node.real_schema_path, value
)
context.dal.add(node.real_data_path, value, backend_type)
return value
def __iter__(self):
context = self._context
node = self._node
# Return Object
return LeafListIterator(context, node, self)
def __len__(self):
context = self._context
node = self._node
results = context.dal.gets(node.real_data_path)
return len(list(results))
def __delitem__(self, arg):
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
context.dal.remove(node.real_data_path, arg)
return None
def get_index(self, index):
"""
Get an item from the list by index.
Example:
node.get_index(0) - returns the value for the leaf-list at the index requested.
"""
context = self._context
node = self._node
results = list(context.dal.gets(node.real_data_path))
try:
result = results[index]
except IndexError:
raise Errors.LeafListDoesNotContainIndexError(
len(results), index, node.real_data_path
)
return result
class List(ContainingNode):
"""
Represents a list from a yang module.
New entries can be created on this object with the create object, each
key defined in the yang module should be passed in paying attention to
the order of the keys.
(e.g.
key1 = True
key2 = False
root.twokeylist.create(key1, key2)
To obtain a specific instance from the list call the get method, passing
each key from the yang module. It is not possible to provide partial keys
in a hope to get multiple records.
The datastore will maintain the order list elements are added, if you
prefer to see items sorted (based on XPATH) then you may iterate around
<this object>._xpath_sorted() instead.
Note: values for the list keys should be provided as they would in an
XPATH express. i.e. python True > 'true', False > 'false'
"""
_NODE_TYPE = "List"
_SORTED_LIST = False
def create(self, *args):
"""
Create a list element.
For composite-key lists then each key within the yang module must be provided
in the same order it is defined within the yang module.
Example:
node.create(value) - create item where there is a single key.
node.create(value1, value2) - create item where there is a composite key.
Returns a ListElement Node of the newly created list item.
Calling the create method a second time will not overwrite/remove data.
"""
context = self._context
node = self._node
(keys, values) = Common.Utils.get_key_val_tuples(context, node, list(args))
node = Common.Utils.get_yangnode(node, context, keys=keys, values=values)
if context.readonly:
raise Errors.ReadonlyError()
context.dal.create(node.real_data_path, keys=keys, values=values)
# Return Object
new_node = Common.YangNode(
node.libyang_node, node.real_schema_path, node.real_data_path
)
return ListElement(context, new_node, self)
def __getattr__(self, attr):
node = self._node
context = self._context
if attr == "_xpath_sorted":
# Return Object
context._trace("SortedList", node, context, self)
return SortedList(context, node, self)
raise Errors.ListItemsMustBeAccesssedByAnElementError(node.real_data_path, attr)
def __setattr__(self, attr, value):
node = self._node
raise Errors.ListItemsMustBeAccesssedByAnElementError(node.real_data_path, attr)
def __len__(self):
context = self.__dict__["_context"]
return context.dal.gets_len(self._node.real_data_path)
def elements(self, sorted_by_xpath=False):
"""
Return a generator of xpaths for each value in the list.
The datastore will maintain the order entries were originally added into the list.
The sorted_by_xpath argument can be used to sort the list by xpath.
"""
context = self._context
node = self._node
if sorted_by_xpath:
return context.dal.gets_sorted(
node.real_data_path, node.real_schema_path, ignore_empty_lists=True
)
return context.dal.gets_unsorted(
node.real_data_path, node.real_schema_path, ignore_empty_lists=True
)
def keys(self, sorted_by_xpath=False):
"""
Return a list of keys in the list.
This is currently not supported.
"""
node = self._node
keys = Common.Utils.get_keys_from_a_node(node)
translated_keys = []
for k in keys:
if k in Types.RESERVED_PYTHON_KEYWORDS:
translated_keys.append(k.replace("-", "_") + "_")
else:
translated_keys.append(k.replace("-", "_"))
translated_keys.sort()
return translated_keys
def __dir__(self):
return self.keys()
def get(self, *args):
"""
Get an item from the list
Example:
node.get(value) - fetch item where there is a single key.
node.get(value1, value2) - fetch item where there is a composite key.
Returns a ListElement Node.
Alternatively access data by node[value] or node[value1, value2]
"""
context = self._context
node = self._node
(keys, values) = Common.Utils.get_key_val_tuples(context, node, list(args))
predicates = Common.Utils.encode_xpath_predicates("", keys, values)
if not context.dal.has_item(node.real_data_path + predicates):
raise Errors.ListDoesNotContainElement(node.real_data_path + predicates)
# Return Object
new_node = Common.YangNode(
node.libyang_node, node.real_schema_path, node.real_data_path + predicates
)
return ListElement(context, new_node, self)
def get_index(self, index):
"""
Get an item from the list by index.
Example:
node.get_index(0) - returns the first list element assuming the list is big enough
"""
context = self._context
node = self._node
results = list(
context.dal.gets_unsorted(
node.real_data_path, node.real_schema_path, ignore_empty_lists=True
)
)
try:
result = results[index]
except IndexError:
raise Errors.ListDoesNotContainIndexError(
len(results), index, node.real_data_path
)
# Return Object
new_node = Common.YangNode(node.libyang_node, node.real_schema_path, result)
return ListElement(context, new_node, self)
def __iter__(self):
context = self._context
node = self._node
# Return Object
return ListIterator(context, node, self, xpath_sorted=self._SORTED_LIST)
def __contains__(self, *args):
context = self._context
node = self._node
(keys, values) = Common.Utils.get_key_val_tuples(context, node, list(args))
predicates = Common.Utils.encode_xpath_predicates("", keys, values)
return context.dal.has_item(node.real_data_path + predicates)
def __getitem__(self, *args):
context = self._context
node = self._node
(keys, values) = Common.Utils.get_key_val_tuples(context, node, list(args))
predicates = Common.Utils.encode_xpath_predicates("", keys, values)
if not context.dal.has_item(node.real_data_path + predicates):
raise Errors.ListDoesNotContainElement(node.real_data_path + predicates)
# Return Object
new_node = Common.YangNode(
node.libyang_node, node.real_schema_path, node.real_data_path + predicates
)
return ListElement(context, new_node, self)
def __delitem__(self, *args):
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
(keys, values) = Common.Utils.get_key_val_tuples(context, node, list(args))
predicates = Common.Utils.encode_xpath_predicates("", keys, values)
context.dal.uncreate(node.real_data_path + predicates)
return None
class SortedList(List):
"""
Represents a list from a yang module.
New entries can be created on this object with the create object, each
key defined in the yang module should be passed in paying attention to
the order of the keys.
(e.g.
key1 = True
key2 = False
root.twokeylist.create(key1, key2)
To obtain a specific instance from the list call the get method, passing
each key from the yang module. It is not possible to provide partial keys
in a hope to get multiple records.
Note: values for the list keys should be provided as they would in an
XPATH express. i.e. python True > 'true', False > 'false'
The only difference between this object and the base List type is that
in this case we go to lengths to sort the list (based on XPATH) rather than
the order things are defiend.
"""
_NODE_TYPE = "SortedList"
_SORTED_LIST = True
class ListIterator(Node):
_NODE_TYPE = "ListIterator"
def __init__(self, context, node, parent_self, xpath_sorted=False):
self.__dict__["_context"] = context
self.__dict__["_node"] = node
self.__dict__["_parent"] = parent_self
self.__dict__["_xpath_sorted"] = xpath_sorted
if xpath_sorted:
self.__dict__["_iterator"] = context.dal.gets_sorted(
node.real_data_path, node.real_schema_path, ignore_empty_lists=True
)
else:
self.__dict__["_iterator"] = context.dal.gets_unsorted(
node.real_data_path, node.real_schema_path, ignore_empty_lists=True
)
def __next__(self):
context = self._context
node = self._node
parent = self._parent
this_xpath = next(self._iterator)
# Return Object
new_node = Common.YangNode(node.libyang_node, node.real_schema_path, this_xpath)
return ListElement(context, new_node, parent)
def __repr__(self):
base_repr = self._base_repr()
if self.__dict__["_xpath_sorted"]:
return base_repr + " Sorted By XPATH"
return base_repr + " Sorted By User (datastore)"
class LeafListIterator(Node):
_NODE_TYPE = "ListIterator"
def __init__(self, context, node, parent_self, xpath_sorted=False):
self.__dict__["_context"] = context
self.__dict__["_node"] = node
self.__dict__["_parent"] = parent_self
self.__dict__["_xpath_sorted"] = xpath_sorted
self.__dict__["_iterator"] = context.dal.gets(node.real_data_path)
def __next__(self):
return next(self.__dict__["_iterator"])
class ListElement(Node):
"""
Represents a specific instance of a list element from a yang module.
The child nodes are accessible from this node.
"""
_NODE_TYPE = "ListElement"
class Container(ContainingNode):
"""
Represents a Container from a yang module, with access to the child
elements.
"""
_NODE_TYPE = "Container"
class PresenceContainer(Container):
"""
Represents a PresenceContainer from a yang module, with access to the child
elements. The exists() method will return True if this container exists
(either created implicitly because of children or explicitly).
"""
_NODE_TYPE = "PresenceContainer"
def exists(self):
context = self._context
path = self._node.real_data_path
return context.dal.container(path)
def create(self):
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
context.dal.create_container(node.real_data_path)
return PresenceContainer(context, node, self)
def destroy(self):
context = self._context
node = self._node
if context.readonly:
raise Errors.ReadonlyError()
context.dal.uncreate(node.real_data_path)
return None
def __repr__(self):
base_repr = self._base_repr()
if self.exists():
return base_repr + " Exists"
return base_repr + " Does Not Exist"
class Root(ContainingNode):
_NODE_TYPE = "Root"
def __repr__(self):
context = self.__dict__["_context"]
return "VoodooTopNode{} YANG Module: " + context.module
class SuperRoot:
_NODE_TYPE = "SuperRoot"
def __init__(self):
self._nodes = {}
self._node = None
def attach_node_from_session(self, session, attachment_point):
"""
From the VooodooNode provided
"""
node = session.get_node()
if not hasattr(node, attachment_point):
raise ValueError("thing isnthere")
setattr(self, attachment_point, getattr(node, attachment_point))
self._nodes[attachment_point] = session
self._node = node
return getattr(node, attachment_point)
def __dir__(self):
k = list(self._nodes.keys())
k.sort()
return k
def __repr__(self):
return "VoodooSuperRoot{}"
|
import os
import numpy as np
import torch
import torchvision
from torch import nn, optim
from torch.utils.data.sampler import SequentialSampler, SubsetRandomSampler
from common import train, test, save_state, save_data, draw_line_graph, draw_multi_lines_graph
# model
class MnistClassifierMSELoss(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(in_features=(28 * 28), out_features=10) # 784 -> 10
def forward(self, x):
x = x.view(-1, (28 * 28)) # flatten
x = self.fc(x)
return x
# experiment
def mnist_classifier_mseloss():
# paths
path = dict()
path['project'] = os.path.dirname(os.path.abspath(__file__))
path['state'] = os.path.join(path['project'], 'epoch')
path['dataset'] = os.path.join(path['project'], 'dataset')
path['graph'] = os.path.join(path['project'], 'graph')
path['array'] = os.path.join(path['project'], 'array')
for key, value in path.items():
if not os.path.exists(path[key]):
os.mkdir(path[key])
# parameters
batch_size = 1000
number_of_epochs = 20
learning_rate = 1e-3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mean = 0.1307
std = 0.3081
loss = nn.MSELoss()
info_per_batch = 6
validation_ratio = 0.1
# transform
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(mean,), std=(std,))
])
# dataset
train_dataset = torchvision.datasets.MNIST(root=path['dataset'], train=True, transform=transform, download=True)
test_dataset = torchvision.datasets.MNIST(root=path['dataset'], train=False, transform=transform, download=True)
# validation dataset
validation_limit = int((1 - validation_ratio) * len(train_dataset))
index_list = list(range(len(train_dataset)))
train_indexes, validation_indexes = index_list[:validation_limit], index_list[validation_limit:]
train_sampler = SubsetRandomSampler(train_indexes)
validation_sampler = SequentialSampler(validation_indexes)
# dataset loaders
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,
sampler=validation_sampler)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size)
# model
model = MnistClassifierMSELoss().to(device)
# optimizer
optimizer = optim.SGD(params=model.parameters(), lr=learning_rate)
epochs = np.arange(start=1, stop=(number_of_epochs + 1), step=1, dtype=int)
print('Mnist Classifier MSELoss')
train_losses = []
train_accuracies = []
validation_losses = []
validation_accuracies = []
test_losses = []
test_accuracies = []
for epoch in epochs:
info = 'Epoch {epoch_index}/{number_of_epochs}'
print(info.format(epoch_index=epoch, number_of_epochs=number_of_epochs))
# train
train_loss, train_accuracy = train(model=model,
device=device,
loader=train_loader,
optimizer=optimizer,
loss=loss,
info_per_batch=info_per_batch,
one_hot_encoded=True)
info = 'Train: Average Loss: {train_loss:.5f}, Accuracy: % {train_accuracy:.2f}'
print(info.format(train_loss=train_loss, train_accuracy=(100 * train_accuracy)))
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
# validation
validation_loss, validation_accuracy = test(model=model,
loader=validation_loader,
device=device,
loss=loss,
info_per_batch=info_per_batch,
info_name='Validation',
one_hot_encoded=True)
info = 'Validation: Average Loss: {validation_loss:.5f}, Accuracy: % {validation_accuracy:.2f}'
print(info.format(validation_loss=validation_loss, validation_accuracy=(100 * validation_accuracy)))
validation_losses.append(validation_loss)
validation_accuracies.append(validation_accuracy)
# test
test_loss, test_accuracy = test(model=model,
loader=test_loader,
device=device,
loss=loss,
info_per_batch=info_per_batch,
info_name='Test',
one_hot_encoded=True)
info = 'Test: Average Loss: {test_loss:.5f}, Accuracy: % {test_accuracy:.2f}'
print(info.format(test_loss=test_loss, test_accuracy=(100 * test_accuracy)))
test_losses.append(test_loss)
test_accuracies.append(test_accuracy)
# epoch state
state_file_name = 'mnist_classifier_mseloss_epoch_{epoch_index}.pkl'.format(epoch_index=epoch)
save_state(model=model, directory=path['state'], file_name=state_file_name)
# train loss
save_data(array=train_losses, directory=path['array'],
file_name='mnist_classifier_mseloss_train_loss.npy')
draw_line_graph(x=epochs, y=train_losses,
x_label='Epoch', y_label='Loss',
title='Mnist Classifier MSELoss Train Loss',
directory=path['graph'],
file_name='mnist_classifier_mseloss_train_loss.png')
# train accuracy
save_data(array=train_accuracies, directory=path['array'],
file_name='mnist_classifier_mseloss_train_accuracy.npy')
draw_line_graph(x=epochs, y=train_accuracies,
x_label='Epoch', y_label='Accuracy',
title='Mnist Classifier MSELoss Train Accuracy',
directory=path['graph'],
file_name='mnist_classifier_mseloss_train_accuracy.png')
# validation loss
save_data(array=validation_losses, directory=path['array'],
file_name='mnist_classifier_mseloss_validation_loss.npy')
draw_line_graph(x=epochs, y=validation_losses,
x_label='Epoch', y_label='Loss',
title='Mnist Classifier MSELoss Validation Loss',
directory=path['graph'],
file_name='mnist_classifier_mseloss_validation_loss.png')
# validation accuracy
save_data(array=validation_accuracies, directory=path['array'],
file_name='mnist_classifier_mseloss_validation_accuracy.npy')
draw_line_graph(x=epochs, y=validation_accuracies,
x_label='Epoch', y_label='Accuracy',
title='Mnist Classifier MSELoss Validation Accuracy',
directory=path['graph'],
file_name='mnist_classifier_mseloss_validation_accuracy.png')
# test loss
save_data(array=test_losses, directory=path['array'],
file_name='mnist_classifier_mseloss_test_loss.npy')
draw_line_graph(x=epochs, y=test_losses,
x_label='Epoch', y_label='Loss',
title='Mnist Classifier MSELoss Test Loss',
directory=path['graph'],
file_name='mnist_classifier_mseloss_test_loss.png')
# test accuracy
save_data(array=test_accuracies, directory=path['array'],
file_name='mnist_classifier_mseloss_test_accuracy.npy')
draw_line_graph(x=epochs, y=test_accuracies,
x_label='Epoch', y_label='Accuracy',
title='Mnist Classifier MSELoss Test Accuracy',
directory=path['graph'],
file_name='mnist_classifier_mseloss_test_accuracy.png')
# loss
draw_multi_lines_graph(
lines=[
dict(label='Train', data=dict(x=epochs, y=train_losses)),
dict(label='Validation', data=dict(x=epochs, y=validation_losses)),
dict(label='Test', data=dict(x=epochs, y=test_losses))
],
x_label='Epoch', y_label='Loss',
title='Mnist Classifier MSELoss Loss',
directory=path['graph'],
file_name='mnist_classifier_mseloss_loss.png')
# accuracy
draw_multi_lines_graph(
lines=[
dict(label='Train', data=dict(x=epochs, y=train_accuracies)),
dict(label='Validation', data=dict(x=epochs, y=validation_accuracies)),
dict(label='Test', data=dict(x=epochs, y=test_accuracies))
],
x_label='Epoch', y_label='Accuracy',
title='Mnist Classifier MSELoss Accuracy',
directory=path['graph'],
file_name='mnist_classifier_mseloss_accuracy.png')
# main
if __name__ == '__main__':
mnist_classifier_mseloss()
|
import logging
import time
import os
import psycopg2
log = logging.getLogger(__name__)
class LoadTest(object):
def __init__(self, gun):
self.gun = gun
self.host = 'web'
def case1(self, missile):
dt = str(time.time())
conn = psycopg2.connect(dbname='goby_test', user='postgres',
host='postgres')
cursor = conn.cursor()
ts = time.time()
cursor.execute('INSERT INTO list_items (title, checked) VALUES (%s, %s)', (dt, 'f'))
cursor.close()
conn.commit()
conn.close()
te = time.time()
with self.gun.measure("case1_step1") as sample:
log.info("Shoot case 1: %s", missile)
sample['interval_real'] = int((te - ts)*1000000)
def setup(self, param):
''' this will be executed in each worker before the test starts '''
log.info("Setting up LoadTest: %s", param)
def teardown(self):
''' this will be executed in each worker after the end of the test '''
log.info("Tearing down LoadTest")
os._exit(0)
return 0
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'fake_multiple_gpus': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('clone_loss', clone_loss)
# tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses
including the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the
sum of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients accross clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_op = control_flow_ops.with_dependencies([update_op], total_loss,
name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.merge_summary(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
fake_multiple_gpus=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `fake_multiple_gpus` is True, the model will only be replicated once on
a single GPU. This trick enables larger batch sizes, necessary for training
deep networks such as InceptionV3/V4, on a single GPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._fake_multiple_gpus = fake_multiple_gpus
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def fake_multiple_gpus(self):
return self._fake_multiple_gpus
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
if self._num_clones > 1 and not self._fake_multiple_gpus:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op == 'Variable':
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
|
from numpy import loadtxt,arctan,pi,arange,array, asarray, linspace, zeros
from matplotlib.pyplot import plot
from .utils import snip,convolve
import xml.etree.ElementTree as et
from scipy.interpolate import interp1d
from .calibration import Calibration
class Spectra():
def __init__(self):
self.calibration = Calibration(self)
def from_array(self,x):
self.counts = x
self.channel = arange(self.counts.__len__())
return self
def from_file(self,filename):
self.counts = loadtxt(filename,unpack=True,usecols=1)
self.channel = arange(self.counts.__len__())
return self
def from_Data(self,data,x=0,y=0):
self.counts = data.data[x,y]
self.channel = arange(self.counts.__len__(),dtype='int')
return self
class SpectraXRF(Spectra):
def __init__(self):
super().__init__()
class SyntheticSpectraXRF(Spectra):
def __init__(self, rl_atnum_list, skip_element = False):
super().__init__()
self.nbins = None
if not isinstance(rl_atnum_list, list):
raise TypeError('list instance expected for elements list')
for i,item in enumerate(rl_atnum_list):
if not isinstance(item, int):
raise TypeError(f'{item} at index {i} is not integer.\nIntegers are expected for Atomic Numbers')
self.rl_atnum_list = sorted(rl_atnum_list)
self.skip_element = skip_element
def set_nbins(self, nbins):
self.nbins = nbins
@staticmethod
def rebin(x,y):
xx = x[::2]
yp = y[:-1] + y[1:]
yy = yp[::2]
return xx, yy
@staticmethod
def get_metadata(xml_data, rl_atnum_list, skip = False):
_time = float(xml_data.find('./xmimsim-input/detector/live_time').text)
reflayer_index = int(xml_data.find("./xmimsim-input/composition/reference_layer").text) - 1
layers = xml_data.findall("./xmimsim-input/composition/layer")
reflayer = layers[reflayer_index]
reflayer_thicknes = float(reflayer.find("thickness").text)
try:
sublayer = layers[reflayer_index + 1]
except IndexError:
sublayer_thicknes = 0.0
else:
sublayer_thicknes = float(sublayer.find("thickness").text)
# elements = np.zeros((len(rl_atnum_list))
weight_fractions = zeros((len(rl_atnum_list)))
for element in reflayer.findall("element"):
atnum = int(element.find("atomic_number").text)
wf = float(element.find("weight_fraction").text)
try:
weight_fractions[rl_atnum_list.index(atnum)] = wf
except ValueError:
if skip == False:
raise ValueError(f'element with atomic number {atnum} not found in elements list\nSet skip_element = True to ignore this error')
return weight_fractions, reflayer_thicknes, sublayer_thicknes, _time
@staticmethod
def get_fluorescence_lines(xml_data, time_correction = None):
class Container:
def __init__(self, symbol, atomic_number, lines ):
self.symbol = symbol
self.atomic_number = atomic_number
self.lines = lines
"""Generator"""
flc = xml_data.findall(".//fluorescence_line_counts")
for element in flc:
lines = {"K" : 0, "L" : 0, "M" : 0, "others" : 0}
for fl in element.findall("fluorescence_line"):
line_type = fl.attrib["type"]
if line_type.startswith("K"):
lines["K"] += float(fl.attrib["total_counts"]) * time_correction if time_correction else float(fl.attrib["total_counts"])
elif line_type.startswith("L"):
lines["L"] += float(fl.attrib["total_counts"]) * time_correction if time_correction else float(fl.attrib["total_counts"])
elif line_type.startswith("M"):
lines["M"] += float(fl.attrib["total_counts"]) * time_correction if time_correction else float(fl.attrib["total_counts"])
else:
lines["others"] += float(fl.attrib["total_counts"]) * time_correction if time_correction else float(fl.attrib["total_counts"])
yield Container(
symbol = element.attrib["symbol"],
atomic_number = element.attrib["atomic_number"],
lines = lines
)
def from_file(self, xmso_filename, interaction_number = 2, shape = None, time_correction = None):
try:
xml_data = et.parse(xmso_filename)
except et.ParseError:
print(f"Error while parsing\n{xmso_filename}")
return None
convoluted = xml_data.find("spectrum_conv")
self.energy = asarray([e.text for e in convoluted.findall(".//energy")], dtype=float)
if time_correction:
self.counts = time_correction * asarray(
[c.text for c in convoluted.findall(f".//counts[@interaction_number = '{interaction_number}']")],
dtype=float,
)
else:
self.counts = asarray(
[c.text for c in convoluted.findall(f".//counts[@interaction_number = '{interaction_number}']")],
dtype=float,
)
if shape:
self.counts = self.counts.reshape(*shape)
if self.nbins:
self.energy, self.counts = self.rebin(self.energy, self.counts)
#b = self.energy[1] - self.energy[0]
#self.counts = self.counts / b
self.channel = arange(self.counts.__len__(),dtype='int16')
self.weight_fractions, self.reflayer_thicknes, self.sublayer_thicknes, self.time = self.get_metadata(xml_data, self.rl_atnum_list, skip = self.skip_element)
self.fluorescence_lines = list(self.get_fluorescence_lines(xml_data, time_correction = time_correction))
return self
def time_correction(self, tc):
self.counts = self.counts * tc
for l in self.fluorescence_lines:
for k, v in l.lines.items():
l.lines[k] = v * tc
return self
class SpectraXRD(Spectra):
def __init__(self):
super().__init__()
def from_array(self, x):
self.counts = x
self.channel = arange(self.counts.__len__(), dtype = 'int')
#self.calculate_signals()
return self
def from_file(self, filename):
counts = loadtxt(filename, unpack = True, dtype = 'int', usecols = 1)
return self.from_array(counts)
def from_Data(self, data, x = 0, y = 0):
self.calibrate_from_parameters(data.opt)
self.counts = data.data[x, y]
self.rescaling = data.rescaling[x, y]
self.intensity = data.intensity[x, y]
self.channel = arange(self.counts.__len__(), dtype = 'int')
return self
def fromDataf(self, data, i):
self.calibrate_from_parameters(data.opt)
self.counts = data.data.reshape(-1,1280)[i]
self.rescaling = data.rescaling.reshape(-1)[i]
self.intensity = data.intensity.reshape(-1,1280)[i]
self.channel = arange(self.counts.__len__(), dtype = 'int')
return self
def remove_background(self, n = 21, std = 3, m = 32):
background = snip(convolve(self.counts, n = n, std = std), m = m)
#self.counts_clean = self.counts - self.background
counts = self.counts - background
self.rescaling = counts.max()
self.intensity = counts / self.rescaling
return self
def calibrate_from_parameters(self, opt):
self.calibration.from_parameters(opt)
return self
def calibrate_from_file(self, filename):
"""
Read data from file and fit the calibration curve
Calibration parameters are stored in self.opt
returns: self
"""
self.calibration.from_file(filename)
return self
@staticmethod
def fce_calibration(x,a,s,beta):
"""
XRD calibration function
x is a channel
"""
return (arctan((x + a) / s)) * 180 / pi + beta
@property
def theta(self):
return self.fce_calibration(self.channel, *self.opt)
def theta_range(self):
x = array([self.channel[0], self.channel[-1]])
return self.fce_calibration(x, *self.opt)
def plot(self,*args,**kwargs):
plot(self.theta,self.intensity,*args,**kwargs)
class FastSpectraXRD():
def __init__(self):
pass
def fromDataf(self, data, i):
self.opt = data.opt.copy()
self.counts = data.data.reshape(-1,1280)[i]
self.rescaling = data.rescaling.flatten()[i]
self.intensity = data.intensity.reshape(-1,1280)[i]
self.intensity1 = 0.5 * (self.intensity[::2] + self.intensity[1::2])
self.intensity2 = 0.5 * (self.intensity1[::2] + self.intensity1[1::2])
self.intensity3 = 0.5 * (self.intensity2[::2] + self.intensity2[1::2])
self.channel = arange(1280)
self.channel1 = arange(0.5,1280,2)
self.channel2 = arange(1.5,1280,4)
self.channel3 = arange(3.5,1280,8)
return self
@staticmethod
def fce_calibration(x,a,s,beta):
"""
XRD calibration function
x is a channel
"""
return (arctan((x + a) / s)) * 180 / pi + beta
@property
def theta(self):
return self.fce_calibration(self.channel, *self.opt)
def theta_range(self):
x = array([self.channel[0], self.channel[-1]])
return self.fce_calibration(x, *self.opt)
def plot(self,*args,**kwargs):
plot(self.theta,self.intensity,*args,**kwargs)
|
from __future__ import absolute_import
from Qshop.celery import app
@app.task
def add(x,y):
return x+y
import json
import requests
from Qshop.settings import DING_URL
@app.task
def sendDing(content="定时任务执行",to="15037609692"):
headers = {
"Content-Type": "application/json",
"Charset": "utf-8"
}
requests_data = {
"msgtype": "text",
"text": {
"content": content
},
"at": {
"atMobiles": [
],
"isAtAll": True
}
}
if to:
requests_data["at"]["atMobiles"].append(to)
requests_data["at"]["isAtAll"] = False
else:
requests_data["at"]["atMobiles"].clear()
requests_data["at"]["isAtAll"] = True
sendData = json.dumps(requests_data)
response = requests.post(url=DING_URL, headers=headers, data=sendData)
content = response.json()
return content
|
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
# 标签
class Tag(models.Model):
tags = models.CharField(max_length=10)
class Meta:
verbose_name_plural = '标签'
def __str__(self):
return self.tags
# 首页大图
class Notice(models.Model):
notices = models.TextField(max_length=50)
urls = models.TextField(("图片链接"))
class Meta:
verbose_name_plural = '首页图片'
def __str__(self):
return self.notices
class Article(models.Model):
'''公告模型'''
tag = models.ForeignKey(Tag,on_delete=models.CASCADE)
title = models.CharField(max_length=50,verbose_name='标题')
ftitle = models.CharField(max_length=50,blank=True,null=True,verbose_name='副标题')
content = RichTextUploadingField(verbose_name='内容')
data_added = models.DateField(verbose_name='发布时间')
photos = models.ImageField(verbose_name='头图')
class Meta:
verbose_name_plural = '公告'
ordering = ['data_added']
def __str__(self):
return f'{self.content[:50]}...'
class Messages(models.Model):
username = models.CharField(max_length=10,verbose_name='昵称',blank=True,null=True)
qq = models.CharField(max_length=12,verbose_name='QQ',blank=True,null=True)
content = RichTextUploadingField(verbose_name='留言',blank=True,null=True)
publish=models.DateTimeField()
class Meta:
verbose_name_plural = '留言'
def __str__(self) -> str:
tpl = '<Message:[username={username}, content={content}, publish={publish}]>'
return tpl.format(username=self.username, content=self.content, publish=self.publish)
|
import sys
def run(code):
codes = code.split(' ')
ret = ""
for code in codes:
length = len(code)
char = chr(length)
ret += char
return ret
if __name__ == "__main__":
if len(sys.argv) > 1:
if sys.argv[1].endswith(".e"):
file = sys.argv[1]
try:
with open(file, 'r') as f:
code = file.read()
ran = run(code)
print(ran)
except:
print("Invalid file.")
sys.exit(1)
else:
print("Invalid file.")
sys.exit(1)
else:
print("No file specified.")
|
from django.core.management.base import BaseCommand, CommandError
from conference import models
from conference import utils
from collections import defaultdict
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--missing-vote',
action='store',
dest='missing_vote',
default=0,
type='float',
help='Used whed a user didn\'t vote a talk',
),
make_option('--show-input',
action='store_true',
dest='show_input',
default=False,
help='Show the input data piped to votengine',
),
)
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
talks = models.Talk.objects\
.filter(conference=conference, status='proposed')
if options['show_input']:
print(utils._input_for_ranking_of_talks(talks, missing_vote=options['missing_vote']))
else:
qs = models.VotoTalk.objects\
.filter(talk__in=talks)\
.values('user')
votes = qs.count()
users = qs.distinct().count()
print('%d talks / %d users / %d votes' % (talks.count(), users, votes))
for ix, t in enumerate(utils.ranking_of_talks(talks, missing_vote=options['missing_vote'])):
print(ix+1, '-', t.id, '-', t.type, '-', t.language, '-', t.title.encode('utf-8'))
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes and methods relating to user rights."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import learner_progress_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import user_services
from core.tests import test_utils
import feconf
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
EXP_ID = 'exp_id'
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup('c@example.com', 'C')
self.signup('d@example.com', 'D')
self.signup('e@example.com', 'E')
self.signup('f@example.com', 'F')
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.VOICEOVER_ADMIN_EMAIL, self.VOICEOVER_ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_f = self.get_user_id_from_email('f@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.user_id_moderator = self.get_user_id_from_email(
self.MODERATOR_EMAIL)
self.user_id_voiceover_admin = self.get_user_id_from_email(
self.VOICEOVER_ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.set_voiceover_admin([self.VOICEOVER_ADMIN_USERNAME])
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_b = user_services.get_user_actions_info(self.user_id_b)
self.user_c = user_services.get_user_actions_info(self.user_id_c)
self.user_d = user_services.get_user_actions_info(self.user_id_d)
self.user_e = user_services.get_user_actions_info(self.user_id_e)
self.user_f = user_services.get_user_actions_info(self.user_id_f)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
self.user_moderator = user_services.get_user_actions_info(
self.user_id_moderator)
self.system_user = user_services.get_system_user()
self.login(self.ADMIN_EMAIL)
user_services.update_user_role(
self.user_id_voiceover_admin, feconf.ROLE_ID_VOICEOVER_ADMIN)
self.user_voiceover_admin = user_services.get_user_actions_info(
self.user_id_voiceover_admin)
def test_get_exploration_rights_for_nonexistent_exploration(self):
non_exp_id = 'this_exp_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class ExplorationRightsModel with id '
'this_exp_does_not_exist_id not found'
):
rights_manager.get_exploration_rights(non_exp_id)
self.assertIsNone(
rights_manager.get_exploration_rights(non_exp_id, strict=False))
def test_demo_exploration(self):
exp_services.load_demo('1')
rights_manager.release_ownership_of_exploration(
self.system_user, '1')
exp_rights = rights_manager.get_exploration_rights('1')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
self.assertTrue(
rights_manager.check_can_manage_voice_artist_in_activity(
self.user_voiceover_admin, exp_rights))
self.assertFalse(
rights_manager.check_can_manage_voice_artist_in_activity(
self.user_a, exp_rights))
self.assertFalse(
rights_manager.check_can_manage_voice_artist_in_activity(
self.user_admin, exp_rights))
self.assertFalse(
rights_manager.check_can_manage_voice_artist_in_activity(
self.user_a, None))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
rights_manager.release_ownership_of_exploration(
self.system_user, '3')
exp_rights = rights_manager.get_exploration_rights('3')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
def test_ownership_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_a))
self.assertFalse(exp_rights.is_owner(self.user_id_b))
self.assertFalse(exp_rights.is_owner(self.user_id_admin))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_collaborator_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_voice_artist_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_playtester_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_voiceover_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_save_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_setting_rights_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_domain.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_domain.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_domain.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_d,
rights_domain.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_f,
rights_domain.ROLE_VIEWER)
def test_publishing_and_unpublishing_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.user_a, exp_rights))
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
def test_unpublished_exploration_is_removed_from_completed_activities(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
learner_progress_services.mark_exploration_as_completed(
self.user_id_f, self.EXP_ID)
self.assertEqual(
learner_progress_services.get_all_completed_exp_ids(
self.user_id_f),
[self.EXP_ID]
)
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
self.process_and_flush_pending_tasks()
self.assertEqual(
learner_progress_services.get_all_completed_exp_ids(
self.user_id_f),
[]
)
def test_unpublished_exploration_is_removed_from_incomplete_activities(
self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
learner_progress_services.mark_exploration_as_incomplete(
self.user_id_e, self.EXP_ID, 'state', 1)
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id_e),
[self.EXP_ID]
)
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
self.process_and_flush_pending_tasks()
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id_e),
[]
)
def test_can_only_delete_unpublished_explorations(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
def test_changing_viewability_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
with self.assertRaisesRegexp(Exception, 'already the current value'):
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, False)
with self.assertRaisesRegexp(Exception, 'cannot be changed'):
rights_manager.set_private_viewability_of_exploration(
self.user_b, self.EXP_ID, True)
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, True)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, False)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
def test_reassign_higher_role_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_viewer(self.user_id_b))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_b))
def test_reassign_lower_role_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_a))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_viewer(self.user_id_b))
def test_check_exploration_rights(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_c,
rights_domain.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_a))
self.assertTrue(exp_rights.is_editor(self.user_id_c))
self.assertTrue(exp_rights.is_viewer(self.user_id_b))
self.assertFalse(exp_rights.is_viewer(self.user_id_a))
self.assertFalse(exp_rights.is_owner(self.user_id_b))
self.assertFalse(exp_rights.is_editor(self.user_id_b))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_d,
rights_domain.ROLE_VOICE_ARTIST)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_voice_artist(self.user_id_d))
self.assertFalse(exp_rights.is_voice_artist(self.user_id_b))
def test_get_multiple_exploration_rights(self):
exp_ids = ['exp1', 'exp2', 'exp3', 'exp4']
# Saving only first 3 explorations to check that None is returned for
# non-existing exploration.
for exp_id in exp_ids[:3]:
self.save_new_valid_exploration(exp_id, self.user_id_admin)
exp_rights = rights_manager.get_multiple_exploration_rights_by_ids(
exp_ids)
self.assertEqual(len(exp_rights), 4)
for rights_object in exp_rights[:3]:
self.assertIsNotNone(rights_object)
self.assertIsNone(exp_rights[3])
def test_owner_cannot_be_reassigned_as_owner(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
with self.assertRaisesRegexp(Exception, 'This user already owns this'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_a,
rights_domain.ROLE_OWNER)
def test_assign_viewer_to_role_owner(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(exp_rights.is_owner(self.user_id_b))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_OWNER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_b))
def test_owner_cannot_assign_voice_artist(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
def test_voiceover_admin_can_modify_voice_artist_role(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_voice_artist(self.user_id_b))
rights_manager.deassign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b)
self.assertFalse(exp_rights.is_voice_artist(self.user_id_b))
def test_owner_cannot_assign_voice_artist_to_core_role(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(exp_rights.is_owner(self.user_id_b))
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
def test_voice_artist_cannot_be_reassigned_as_voice_artist(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
with self.assertRaisesRegexp(
Exception, 'This user already can voiceover this'):
rights_manager.assign_role_for_exploration(
self.user_voiceover_admin, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
def test_viewer_cannot_be_reassigned_as_viewer(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
with self.assertRaisesRegexp(
Exception, 'This user already can view this'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
def test_public_explorations_cannot_be_assigned_role_viewer(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
with self.assertRaisesRegexp(
Exception, 'Public explorations can be viewed by anyone.'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
def test_cannot_assign_invalid_role(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
with self.assertRaisesRegexp(Exception, 'Invalid role: invalid_role'):
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b, 'invalid_role')
def test_deassign_without_rights_fails(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
with self.assertRaisesRegexp(
Exception, 'Could not deassign role'):
rights_manager.deassign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_a)
def test_deassign_viewer_is_successful(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_viewer(self.user_id_b))
rights_manager.deassign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(exp_rights.is_viewer(self.user_id_b))
def test_deassign_editor_is_successful(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_editor(self.user_id_b))
rights_manager.deassign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(exp_rights.is_editor(self.user_id_b))
def test_deassign_owner_is_successful(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b, rights_domain.ROLE_OWNER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_b))
rights_manager.deassign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(exp_rights.is_owner(self.user_id_b))
def test_deassign_non_existent_fails(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
with self.assertRaisesRegexp(
Exception, 'This user does not have any role in'):
rights_manager.deassign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b)
class CollectionRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on collections work as expected."""
COLLECTION_ID = 'collection_id'
EXP_ID_FOR_COLLECTION = 'exp_id_for_collection'
def setUp(self):
super(CollectionRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup('c@example.com', 'C')
self.signup('d@example.com', 'D')
self.signup('e@example.com', 'E')
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.user_id_moderator = self.get_user_id_from_email(
self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_b = user_services.get_user_actions_info(self.user_id_b)
self.user_c = user_services.get_user_actions_info(self.user_id_c)
self.user_d = user_services.get_user_actions_info(self.user_id_d)
self.user_e = user_services.get_user_actions_info(self.user_id_e)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
self.user_moderator = user_services.get_user_actions_info(
self.user_id_moderator)
self.system_user = user_services.get_system_user()
self.login(self.ADMIN_EMAIL)
def test_get_collection_rights_for_nonexistent_collection(self):
non_col_id = 'this_collection_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class CollectionRightsModel with id '
'this_collection_does_not_exist_id not found'
):
rights_manager.get_collection_rights(non_col_id)
self.assertIsNone(
rights_manager.get_collection_rights(non_col_id, strict=False))
def test_demo_collection(self):
collection_services.load_demo('0')
rights_manager.release_ownership_of_collection(
self.system_user, '0')
collection_rights = rights_manager.get_collection_rights('0')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, collection_rights))
def test_ownership_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_owner(self.user_id_a))
self.assertFalse(collection_rights.is_owner(self.user_id_b))
self.assertFalse(collection_rights.is_owner(self.user_id_admin))
def test_newly_created_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, collection_rights))
def test_owner_cannot_be_reassigned_as_owner(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
with self.assertRaisesRegexp(Exception, 'This user already owns this'):
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_a,
rights_domain.ROLE_OWNER)
def test_editor_can_be_reassigned_as_owner(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_editor(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
self.assertTrue(collection_rights.is_owner(self.user_id_b))
self.assertFalse(collection_rights.is_editor(self.user_id_b))
def test_voiceartist_can_be_reassigned_as_owner(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_voice_artist(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
self.assertTrue(collection_rights.is_owner(self.user_id_b))
self.assertFalse(collection_rights.is_voice_artist(self.user_id_b))
def test_viewer_can_be_reassigned_as_owner(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_viewer(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
self.assertTrue(collection_rights.is_owner(self.user_id_b))
self.assertFalse(collection_rights.is_viewer(self.user_id_b))
def test_viewer_can_be_reassigned_as_editor(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_viewer(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
self.assertTrue(collection_rights.is_editor(self.user_id_b))
self.assertFalse(collection_rights.is_viewer(self.user_id_b))
def test_voiceartist_can_be_reassigned_as_editor(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_voice_artist(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
self.assertTrue(collection_rights.is_editor(self.user_id_b))
self.assertFalse(collection_rights.is_voice_artist(self.user_id_b))
def test_viewer_can_be_reassigned_as_voiceartist(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_viewer(self.user_id_b))
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
self.assertTrue(collection_rights.is_voice_artist(self.user_id_b))
self.assertFalse(collection_rights.is_viewer(self.user_id_b))
def test_editor_cannot_be_reassigned_as_editor(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
with self.assertRaisesRegexp(
Exception, 'This user already can edit this'):
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
def test_voice_artist_cannot_be_reassigned_as_voice_artist(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
with self.assertRaisesRegexp(
Exception, 'This user already can voiceover this'):
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST)
def test_viewer_cannot_be_reassigned_as_viewer(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
with self.assertRaisesRegexp(
Exception, 'This user already can view this'):
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
def test_public_collection_cannot_be_assigned_role_viewer(self):
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID)
collection_services.save_new_collection(self.user_id_a, collection)
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID)
with self.assertRaisesRegexp(
Exception, 'Public collections can be viewed by anyone.'):
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
def test_inviting_collaborator_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
# Verify initial editor permissions for the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
# Verify initial editor permissions for the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# User A adds user B to the collection as an editor.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
# Ensure User A is the only user in the owner names list.
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
# Ensure User B is now an editor of the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, collection_rights))
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Ensure User B is not an editor of the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
def test_inviting_playtester_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Verify initial viewer permissions for the collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# Verify initial viewer permissions for the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
# User A adds user B to the collection as a viewer.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Ensure User B is now a viewer of the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# Ensure User B cannot view the exploration just because he/she has
# access to the collection containing it.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
def test_setting_rights_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_domain.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_domain.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_domain.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_domain.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_d,
rights_domain.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_e,
rights_domain.ROLE_VIEWER)
def test_publishing_and_unpublishing_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.user_a, collection_rights))
rights_manager.unpublish_collection(
self.user_admin, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
def test_can_only_delete_unpublished_collections(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
rights_manager.unpublish_collection(
self.user_admin, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
def test_deassign_without_rights_fails(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
with self.assertRaisesRegexp(
Exception, 'Could not deassign role'):
rights_manager.deassign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_a)
def test_deassign_viewer_is_successful(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a,
self.COLLECTION_ID,
self.user_id_b,
rights_domain.ROLE_VIEWER
)
col_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(col_rights.is_viewer(self.user_id_b))
rights_manager.deassign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertFalse(col_rights.is_viewer(self.user_id_b))
def test_deassign_voice_artist_is_successful(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a,
self.COLLECTION_ID,
self.user_id_b,
rights_domain.ROLE_VOICE_ARTIST
)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertTrue(col_rights.is_voice_artist(self.user_id_b))
rights_manager.deassign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertFalse(col_rights.is_voice_artist(self.user_id_b))
def test_deassign_editor_is_successful(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a,
self.COLLECTION_ID,
self.user_id_b,
rights_domain.ROLE_EDITOR
)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertTrue(col_rights.is_editor(self.user_id_b))
rights_manager.deassign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertFalse(col_rights.is_editor(self.user_id_b))
def test_deassign_owner_is_successful(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a,
self.COLLECTION_ID,
self.user_id_b,
rights_domain.ROLE_OWNER
)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertTrue(col_rights.is_owner(self.user_id_b))
rights_manager.deassign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b)
col_rights = rights_manager.get_collection_rights(self.COLLECTION_ID)
self.assertFalse(col_rights.is_owner(self.user_id_b))
def test_deassign_non_existent_fails(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
with self.assertRaisesRegexp(
Exception, 'This user does not have any role in'):
rights_manager.deassign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b)
class CheckCanReleaseOwnershipTest(test_utils.GenericTestBase):
"""Tests for check_can_release_ownership function."""
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
def setUp(self):
super(CheckCanReleaseOwnershipTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.get_user_actions_info(self.admin_id)
self.owner = user_services.get_user_actions_info(self.owner_id)
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_admin_can_release_ownership_of_published_exploration(self):
self.assertTrue(rights_manager.check_can_release_ownership(
self.admin,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_owner_can_release_ownership_of_published_exploration(self):
self.assertTrue(rights_manager.check_can_release_ownership(
self.owner,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_admin_cannot_release_ownership_of_private_exploration(self):
self.assertFalse(rights_manager.check_can_release_ownership(
self.admin,
rights_manager.get_exploration_rights(self.private_exp_id)))
def test_owner_cannot_release_ownership_of_private_exploration(self):
self.assertFalse(rights_manager.check_can_release_ownership(
self.owner,
rights_manager.get_exploration_rights(self.private_exp_id)))
class CheckCanUnpublishActivityTest(test_utils.GenericTestBase):
"""Tests for check_can_unpublish_activity function."""
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
private_col_id = 'col_id_1'
published_col_id = 'col_id_2'
def setUp(self):
super(CheckCanUnpublishActivityTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin = user_services.get_user_actions_info(self.admin_id)
self.owner = user_services.get_user_actions_info(self.owner_id)
self.moderator = user_services.get_user_actions_info(self.moderator_id)
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
self.save_new_valid_collection(
self.published_col_id, self.owner_id,
exploration_id=self.published_col_id)
self.save_new_valid_collection(
self.private_col_id, self.owner_id,
exploration_id=self.private_col_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
rights_manager.publish_collection(self.owner, self.published_col_id)
def test_admin_can_unpublish_published_collection(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_collection_rights(self.published_col_id)))
def test_owner_cannot_unpublish_published_collection(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.owner,
rights_manager.get_collection_rights(self.published_col_id)))
def test_admin_cannot_unpublish_private_collection(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_collection_rights(self.private_col_id)))
def test_admin_can_unpublish_published_exploration(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_owner_cannot_unpublish_published_exploration(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.owner,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_admin_cannot_unpublish_private_exploration(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_exploration_rights(self.private_exp_id)))
def test_moderator_can_unpublish_published_exploration(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.moderator,
rights_manager.get_exploration_rights(self.published_exp_id)))
|
# Generated by Django 2.0.5 on 2018-07-15 16:06
from django.db import migrations
from django.db.models import OuterRef, Subquery
import django_countries.fields
def update_country_from_airport(apps, schema_editor):
Person = apps.get_model('workshops', 'Person')
Airport = apps.get_model('workshops', 'Airport')
persons = Person.objects.exclude(airport=None)
# This special construction uses Subquery abilities introduced in
# Django v1.11; they make possible forcing Django ORM to use SQL subqueries.
# https://docs.djangoproject.com/en/2.0/ref/models/expressions/#subquery-expressions
# First, a list of airports corresponding to person IDs (to be grabbed from
# outer query) is generated.
airports = Airport.objects.filter(person=OuterRef('pk'))
# Then a subquery using only one column and producing only one row.
sq = Subquery(airports.values('country')[:1])
# Finally, the update.
persons.update(country=sq)
class Migration(migrations.Migration):
dependencies = [
('workshops', '0143_auto_20180712_1337'),
]
operations = [
migrations.AddField(
model_name='person',
name='country',
field=django_countries.fields.CountryField(blank=True, default='', help_text="Person's country of residence.", max_length=2),
),
migrations.RunPython(update_country_from_airport),
]
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/ithorian/shared_ith_pants_s05.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","ith_pants_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[2]) # number=8
prog.x(input_qubit[2]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_noisy63.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
#!/usr/bin/env python
"""
1a. As you have done in previous classes,
create a Python file named "my_devices.py".
In this file, define the connection information for:
'cisco3', 'arista1', 'arista2', and 'srx2'.
This file should contain all the necessary information
to create a Netmiko connection. Use getpass() for the password handling.
Use a global_delay_factor of 4 for both the arista1 device and the arista2 device.
This Python module should be used to store
the connection information for all of the exercises in this lesson.
1b. Create a Python script that executes "show version"
on each of the network devices defined in my_devices.py.
This script should execute serially i.e. one SSH connection after the other.
Record the total execution time for the script.
Print the "show version" output and the total execution time to standard output.
As part of this exercise, you should create a function
that both establishes a Netmiko connection and that executes a single show command
that you pass in as argument.
This function's arguments should be the Netmiko device dictionary
and the "show-command" argument.
The function should return the result from the show command.
"""
from netmiko import ConnectHandler
from my_devices import devices
from datetime import datetime
import time
def netmiko_show_ver(device, command):
with ConnectHandler(**device) as net_connect:
output = net_connect.send_command(command)
return output
show_command = "show version"
if __name__=="__main__":
start_time = datetime.now()
t0 = time.time()
for device in devices:
result = netmiko_show_ver(device, show_command)
print("#" * 80)
print(f"OUTPUT ----------> {device['host']}")
print("#" * 80)
print(result)
end_time = datetime.now()
t1 = time.time()
print("#" * 80)
print("SCRIPT FINISHED EXECUTION")
print("#" * 80)
print(f"Execution time: {end_time - start_time}")
print(f"Execution time: {t1 - t0:.2f}")
print("#" * 80)
|
from pymongo import MongoClient
from igraph import Graph
mc = MongoClient()
db = mc.test
CLO_PATH_PARSE = 'answer/%s/closeness.txt'
DEG_PATH_PARSE = 'answer/%s/degree.txt'
def write_answer_into_mongo(graph_name):
collection = db[graph_name]
collection.drop()
graph = Graph.Read_Ncol('data/'+graph_name+'.txt', directed=False)
graph = graph.simplify()
degree_dict = {}
for v in graph.vs:
degree_dict[v['name']] = v.degree()
with open(CLO_PATH_PARSE % graph_name) as clo:
for line_id, line in enumerate(clo.readlines()):
id_, clo = line.strip().split('\t')
#c = collection.find_one({'node_id': id_})
#if not c:
c = {'node_id': id_, 'closeness': clo, 'order': line_id+1,
'degree': degree_dict[id_]}
#else:
# c['closeness'] = clo
# c['order'] = line_id + 1
# c['degree'] = 0
collection.insert(c)
'''
graph = Graph.Read_Ncol('data/'+graph_name+'.txt', directed=False)
graph = graph.simplify()
for v in graph.vs:
c = collection.find_one({'node_id': v['name']})
if not c:
c = {'node_id': id_, 'degree': v.degree()}
else:
c['degree'] = v.degree()
collection.save(c)
'''
if __name__ == '__main__':
write_answer_into_mongo('public')
'''
import os
graphs = os.listdir('answer')
for g in graphs:
write_answer_into_mongo(g)
print "======%s has writed ======" % g
'''
|
from multipledispatch import dispatch
from utils.DomainUtils import DomainUtils
class UserDto:
@dispatch(str, str)
def __init__(self, username, password) -> None:
self._id = -1,
self._first_name = None,
self._last_name = None,
self._username = username,
self._password = password,
self._sq_1 = None,
self._sq_ans_1 = None,
self._sq_2 = None,
self._sq_ans_2 = None,
self._sq_3 = None,
self._sq_ans_3 = None
@dispatch(int, str, str, str)
def __init__(self, id, first_name, last_name, username) -> None:
self._id = id,
self._first_name = first_name,
self._last_name = last_name,
self._username = username,
self._password = None,
self._sq_1 = None,
self._sq_ans_1 = None,
self._sq_2 = None,
self._sq_ans_2 = None,
self._sq_3 = None,
self._sq_ans_3 = None
@dispatch(int, str, str, str, str, str, str, str, str)
def __init__(self, user_id,
username,
password,
# sq: security questions
sq_1,
sq_ans_1,
sq_2,
sq_ans_2,
sq_3,
sq_ans_3
) -> None:
self._id = user_id
self._first_name = None
self._last_name = None
self._username = username
self._password = password
self._sq_1 = sq_1
self._sq_ans_1 = sq_ans_1
self._sq_2 = sq_2
self._sq_ans_2 = sq_ans_2
self._sq_3 = sq_3
self._sq_ans_3 = sq_ans_3
@dispatch(int, str, str, str, str, str, str, str, str, str, str)
def __init__(self, user_id,
first_name,
last_name,
username,
password,
# sq: security questions
sq_1,
sq_ans_1,
sq_2,
sq_ans_2,
sq_3,
sq_ans_3
) -> None:
self._id = user_id
self._first_name = first_name
self._last_name = last_name
self._username = username
self._password = password
self._sq_1 = sq_1
self._sq_ans_1 = sq_ans_1
self._sq_2 = sq_2
self._sq_ans_2 = sq_ans_2
self._sq_3 = sq_3
self._sq_ans_3 = sq_ans_3
@dispatch(int, str, str, str, str, str, str, str, str)
def __init__(self, user_id,
username,
password,
# sq: security questions
sq_1,
sq_ans_1,
sq_2,
sq_ans_2,
sq_3,
sq_ans_3
) -> None:
self._id = user_id
self._first_name = None
self._last_name = None
self._username = username
self._password = password
self._sq_1 = sq_1
self._sq_ans_1 = sq_ans_1
self._sq_2 = sq_2
self._sq_ans_2 = sq_ans_2
self._sq_3 = sq_3
self._sq_ans_3 = sq_ans_3
def __str__(self) -> str:
to_string = f"UserDto[id:{self._id},first_name:\'{self._first_name}\',last_name:\'{self._last_name}\',username:\'{self._username}\',password:\'{self._password}\',sq_1:\'{self._sq_1}\',sq_ans_1:\'{self._sq_ans_1}\',sq_2:\'{self._sq_2}\',sq_ans_2:\'{self._sq_ans_2}\',sq_3:\'{self._sq_3}\',sq_ans_3:\'{self._sq_ans_3}\']"
return DomainUtils.correct_obj_str_format(to_string)
def id(self):
return self._id
def first_name(self):
return self._first_name
def last_name(self):
return self._last_name
def username(self):
return self._username
def password(self):
return self._password
def sq_1(self):
return self._sq_1
def sq_ans_1(self):
return self._sq_ans_1
def sq_2(self):
return self._sq_2
def sq_ans_2(self):
return self._sq_ans_2
def sq_3(self):
return self._sq_3
def sq_ans_3(self):
return self._sq_ans_3
def get_security_questions(self):
return [self._sq_1, self._sq_2, self._sq_3]
def authenticate(self, password):
return self._password == password
|
"""Miscellaneous internal PyJanitor helper functions."""
import functools
import os
import sys
import warnings
from itertools import chain, product
from typing import Callable, Dict, List, Optional, Pattern, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable.
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:returns: TypeError if data is not the expected type.
"""
is_expected_type = False
for t in expected_types:
if isinstance(value, t):
is_expected_type = True
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
else:
return "".join(i for i in x if i in acceptable_currency_characters)
else:
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:return: Nothing; the passed `kwargs` are modified directly.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, old_column_names: List, present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of a column.
Should be used like this::
check(df, ['a', 'b'], present=True)
:param df: The name of the variable.
:param old_column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of old_column_names
are in df.columns. If False, checks that none of old_column_names are
in df.columns.
:returns: ValueError if data is not the expected type.
"""
for column_name in old_column_names:
if present:
if column_name not in df.columns:
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
else: # Tests for exclusion
if column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
else:
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping errors in a function
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception:
if return_x:
return x
return return_val
return _wrapped
def _check_instance(entry: Dict):
"""
Function to check instances in the expand_grid function.
This checks if entry is a dictionary,
checks the instance of value in key:value pairs in entry,
and makes changes to other types as deemed necessary.
Additionally, ValueErrors are raised if empty containers are
passed in as values into the dictionary.
How each type is handled, and their associated exceptions,
are pretty clear from the code.
"""
# dictionary should not be empty
if not entry:
raise ValueError("passed dictionary cannot be empty")
# couple of checks that should cause the program to fail early
# if conditions are not met
for _, value in entry.items():
if isinstance(value, np.ndarray):
if value.size == 0:
raise ValueError("array cannot be empty")
if value.ndim > 2:
raise ValueError(
"expand_grid works only on 1D and 2D structures."
)
if isinstance(value, (pd.DataFrame, pd.Series)):
if value.empty:
raise ValueError("passed DataFrame cannot be empty")
if isinstance(value, (list, tuple, set, dict)):
if not value:
raise ValueError("passed data cannot be empty")
entry = {
# If it is a scalar value, then wrap in a list
# this is necessary, as we will use the itertools.product function
# which works only on iterables.
key: [value]
if isinstance(value, (type(None), int, float, bool, str, np.generic))
else value
for key, value in entry.items()
}
return entry
def _grid_computation(entry: Dict) -> pd.DataFrame:
"""
Return the final output of the expand_grid function as a dataframe.
This kicks in after the ``_check_instance`` function is completed,
and essentially creates a cross join of the values in the `entry`
dictionary. If the `entry` dictionary is a collection of lists/tuples,
then `itertools.product` will be used for the cross join, before a
dataframe is created; if however, the `entry` contains a pandas dataframe
or a pandas series or a numpy array, then identical indices are created for
each entry and `pandas DataFrame join` is called to create the cross join.
"""
# checks if the dictionary does not have any of
# (pd.Dataframe, pd.Series, numpy) values and uses itertools.product.
# numpy meshgrid is faster, but requires homogenous data to appreciate
# the speed, and also to keep the data type for each column created.
# As an example, if we have a mix in the dictionary of strings and numbers,
# numpy will convert it to an object data type. Itertools product is
# efficient and does not lose the data type.
if not any(
isinstance(value, (pd.DataFrame, pd.Series, np.ndarray))
for key, value in entry.items()
):
df_expand_grid = (value for key, value in entry.items())
df_expand_grid = product(*df_expand_grid)
return pd.DataFrame(df_expand_grid, columns=entry)
# dictionary is a mix of different types - dataframe/series/numpy/...
# so we check for each data type- if it is a pandas dataframe, then convert
# to numpy and add to `df_expand_grid`; the other data types are added to
# `df_expand_grid` as is. For each of the data types, new column names are
# created if they do not have, and modified if names already exist. These
# names are built through the for loop below and added to `df_columns`
df_columns = []
df_expand_grid = []
for key, value in entry.items():
if isinstance(value, pd.DataFrame):
df_expand_grid.append(value.to_numpy())
if isinstance(value.columns, pd.MultiIndex):
df_columns.extend(
[f"{key}_{ind}" for ind, col in enumerate(value.columns)]
)
else:
df_columns.extend([f"{key}_{col}" for col in value])
elif isinstance(value, pd.Series):
df_expand_grid.append(np.array(value))
if value.name:
df_columns.append(f"{key}_{value.name}")
else:
df_columns.append(str(key))
elif isinstance(value, np.ndarray):
df_expand_grid.append(value)
if value.ndim == 1:
df_columns.append(f"{key}_0")
else:
df_columns.extend(
[f"{key}_{ind}" for ind in range(value.shape[-1])]
)
else:
df_expand_grid.append(value)
df_columns.append(key)
# here we run the product function from itertools only if there is
# more than one item in the list; if only one item, we simply
# create a dataframe with the new column names from `df_columns`
if len(df_expand_grid) > 1:
df_expand_grid = product(*df_expand_grid)
df_expand_grid = (
chain.from_iterable(
[val]
if not isinstance(val, (pd.DataFrame, pd.Series, np.ndarray))
else val
for val in value
)
for value in df_expand_grid
)
return pd.DataFrame(df_expand_grid, columns=df_columns)
return pd.DataFrame(*df_expand_grid, columns=df_columns)
def _complete_groupings(df, list_of_columns):
# this collects all the columns as individual labels, which will be
# used to set the index of the dataframe
index_columns = []
# this will collect all the values associated with the respective
# columns, and used to reindex the dataframe, to get the complete
# pairings
reindex_columns = []
for item in list_of_columns:
if not isinstance(item, (str, dict, list, tuple)):
raise ValueError(
"""Value must either be a column label, a list/tuple of columns or a
dictionary where the keys are columns in the dataframe."""
)
if not item:
raise ValueError("grouping cannot be empty")
if isinstance(item, str):
reindex_columns.append(set(df[item].array))
index_columns.append(item)
else:
# this comes into play if we wish to input values that
# do not exist in the data, say years, or alphabets, or
# range of numbers
if isinstance(item, dict):
if len(item) > 1:
index_columns.extend(item.keys())
else:
index_columns.append(*item.keys())
item_contents = [
# convert scalars to iterables; this is necessary
# when creating combinations with itertools' product
[value]
if isinstance(value, (int, float, str, bool))
else value
for key, value in item.items()
]
reindex_columns.extend(item_contents)
else:
index_columns.extend(item)
# TODO : change this to read as a numpy instead
# instead of a list comprehension
# it should be faster
item = (df[sub_column].array for sub_column in item)
item = set(zip(*item))
reindex_columns.append(item)
reindex_columns = product(*reindex_columns)
# A list comprehension, coupled with itertools chain.from_iterable
# would likely be faster; I fear that it may hamper readability with
# nested list comprehensions; as such, I chose the for loop method.
new_reindex_columns = []
for row in reindex_columns:
new_row = []
for cell in row:
if isinstance(cell, tuple):
new_row.extend(cell)
else:
new_row.append(cell)
new_reindex_columns.append(tuple(new_row))
df = df.set_index(index_columns)
return df, new_reindex_columns
def _data_checks_pivot_longer(
df, index, column_names, names_sep, names_pattern, names_to, values_to
):
"""
This function raises errors or warnings if the arguments have the wrong
python type, or if an unneeded argument is provided.
This function is executed before proceeding to the computation phase.
Type annotations are not provided because this function is where type
checking happens.
"""
if any(
(
isinstance(df.index, pd.MultiIndex),
isinstance(df.columns, pd.MultiIndex),
),
):
warnings.warn(
"""pivot_longer is designed for single index dataframes and
may produce unexpected results for multiIndex dataframes;
for such cases, kindly use pandas.melt."""
)
if index is not None:
if isinstance(index, str):
index = [index]
check("index", index, [list, tuple, Pattern])
if column_names is not None:
if isinstance(column_names, str):
column_names = [column_names]
check("column_names", column_names, [list, tuple, Pattern])
if names_to is not None:
check("names_to", names_to, [list, tuple, str])
if isinstance(names_to, (list, tuple)):
if not all(isinstance(word, str) for word in names_to):
raise TypeError(
"All entries in `names_to` argument must be strings."
)
if len(names_to) > 1:
if all((names_pattern is not None, names_sep is not None)):
raise ValueError(
"""Only one of names_pattern or names_sep
should be provided."""
)
if isinstance(names_to, str) or (len(names_to) == 1):
# names_sep creates more than one column
# whereas regex with names_pattern can be limited to one column
if names_sep is not None:
raise ValueError(
"""
For a single names_to value,
names_sep is not required.
"""
)
if names_pattern is not None:
check("names_pattern", names_pattern, [str, Pattern])
if names_sep is not None:
check("names_sep", names_sep, [str, Pattern])
check("values_to", values_to, [str])
return (
df,
index,
column_names,
names_sep,
names_pattern,
names_to,
values_to,
)
def _pivot_longer_pattern_match(
df: pd.DataFrame,
index: Optional[Union[str, Pattern]] = None,
column_names: Optional[Union[str, Pattern]] = None,
) -> Tuple:
"""
This checks if a pattern (regular expression) is supplied
to index or columns and extracts the names that match the
given regular expression.
"""
if isinstance(column_names, Pattern):
column_names = [col for col in df if column_names.search(col)]
if isinstance(index, Pattern):
index = [col for col in df if index.search(col)]
return df, index, column_names
def _reindex_func(frame: pd.DataFrame, indexer=None) -> pd.DataFrame:
"""
Function to reshape dataframe in pivot_longer, to try and make it look
similar to the source data in terms of direction of the columns. It is a
temporary measure until the minimum pandas version is 1.1, where we can
take advantage of the `ignore_index` argument in `pd.melt`.
Example: if columns are `id, ht1, ht2, ht3`, then depending on the
arguments passed, the column in the reshaped dataframe, based on this
function, will look like `1,2,3,1,2,3,1,2,3...`. This way, we ensure that
for every index, there is a complete set of the data.
A reindexed dataframe is returned.
"""
if indexer is None:
uniq_index_length = len(frame.drop_duplicates())
else:
uniq_index_length = len(frame.loc[:, indexer].drop_duplicates())
if "index" in indexer:
frame = frame.drop("index", axis=1)
sorter = np.reshape(frame.index, (-1, uniq_index_length))
# reshaped in Fortan order achieves the alternation
sorter = np.ravel(sorter, order="F")
return frame.reindex(sorter)
def _computations_pivot_longer(
df: pd.DataFrame,
index: Optional[Union[List, Tuple]] = None,
column_names: Optional[Union[List, Tuple]] = None,
names_sep: Optional[Union[str, Pattern]] = None,
names_pattern: Optional[Union[str, Pattern]] = None,
names_to: Optional[Union[List, Tuple, str]] = None,
values_to: Optional[str] = "value",
) -> pd.DataFrame:
"""
This is the main workhorse of the `pivot_longer` function.
There are a couple of scenarios that this function takes care of when
unpivoting :
1. Regular data unpivoting is covered with pandas melt.
2. if the length of `names_to` is > 1, the function unpivots the data,
using `pd.melt`, and then separates into individual columns, using
`str.split(expand=True)` if `names_sep` is provided or
`str.extractall()` if `names_pattern is provided. The labels in
`names_to` become the new column names.
3. If `names_to` contains `.value`, then the function replicates
`pd.wide_to_long`, using `pd.melt`. Unlike `pd.wide_to_long`, the
stubnames do not have to be prefixes, they just need to match the
position of `.value` in `names_to`. Just like in 2 above, the columns
are separated into individual columns. The labels in the column
corresponding to `.value` become the new column names, and override
`values_to` in the process. The other extracted column stays
(if len(`names_to`) is > 1), with the other name in `names_to` as
its column name.
The function also tries to emulate the way the source data is structured.
Say data looks like this :
id, a1, a2, a3, A1, A2, A3
1, a, b, c, A, B, C
when pivoted into long form, it will look like this :
id instance a A
0 1 1 a A
1 1 2 b B
2 1 3 c C
where the columns `a` comes before `A`, as it was in the source data,
and in column `a`, `a > b > c`, also as it was in the source data.
This also visually creates a complete set of the data per index.
"""
if index is not None:
check_column(df, index, present=True)
# this should take care of non unique index
# we'll get rid of the extra in _reindex_func
# TODO: what happens if `index` is already a name
# in the columns?
if df.loc[:, index].duplicated().any():
df = df.reset_index()
index = ["index"] + index
if column_names is not None:
check_column(df, column_names, present=True)
if index is None and (column_names is not None):
index = df.columns.difference(column_names)
# scenario 1
if all((names_pattern is None, names_sep is None)):
df = pd.melt(
df,
id_vars=index,
value_vars=column_names,
var_name=names_to,
value_name=values_to,
)
# reshape in the order that the data appears
# this should be easier to do with ignore_index in pandas version 1.1
if index is not None:
df = _reindex_func(df, index).reset_index(drop=True)
return df.transform(pd.to_numeric, errors="ignore")
return df
# scenario 2
if any((names_pattern is not None, names_sep is not None)):
# should avoid conflict if index/columns has a string named `variable`
uniq_name = "*^#variable!@?$%"
df = pd.melt(
df, id_vars=index, value_vars=column_names, var_name=uniq_name
)
# pd.melt returns uniq_name and value as the last columns. We can use
# that knowledge to get the data before( the index column(s)),
# the data between (our uniq_name column),
# and the data after (our values column)
position = df.columns.get_loc(uniq_name)
if position == 0:
before_df = pd.DataFrame([], index=df.index)
else:
# just before uniq_name column
before_df = df.iloc[:, :-2]
after_df = df.iloc[:, -1].rename(values_to)
between_df = df.pop(uniq_name)
if names_sep is not None:
between_df = between_df.str.split(names_sep, expand=True)
else:
between_df = between_df.str.extractall(names_pattern).droplevel(-1)
# set_axis function labels argument takes only list-like objects
if isinstance(names_to, str):
names_to = [names_to]
if len(names_to) != between_df.shape[-1]:
raise ValueError(
"""
Length of ``names_to`` does not match
number of columns extracted.
"""
)
before_df = _reindex_func(before_df, index)
between_df = between_df.set_axis(names_to, axis="columns")
# we take a detour here to deal with paired columns, where the user
# might want one of the names in the paired column as part of the
# new column names. The `.value` indicates that that particular
# value becomes a header.
# It is also another way of achieving pandas wide_to_long.
# Let's see an example of a paired column
# say we have this data :
# code is copied from pandas wide_to_long documentation
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.wide_to_long.html
# famid birth ht1 ht2
# 0 1 1 2.8 3.4
# 1 1 2 2.9 3.8
# 2 1 3 2.2 2.9
# 3 2 1 2.0 3.2
# 4 2 2 1.8 2.8
# 5 2 3 1.9 2.4
# 6 3 1 2.2 3.3
# 7 3 2 2.3 3.4
# 8 3 3 2.1 2.9
# and we want to reshape into data that looks like this :
# famid birth age ht
# 0 1 1 1 2.8
# 1 1 1 2 3.4
# 2 1 2 1 2.9
# 3 1 2 2 3.8
# 4 1 3 1 2.2
# 5 1 3 2 2.9
# 6 2 1 1 2.0
# 7 2 1 2 3.2
# 8 2 2 1 1.8
# 9 2 2 2 2.8
# 10 2 3 1 1.9
# 11 2 3 2 2.4
# 12 3 1 1 2.2
# 13 3 1 2 3.3
# 14 3 2 1 2.3
# 15 3 2 2 3.4
# 16 3 3 1 2.1
# 17 3 3 2 2.9
# we have height(`ht`) and age(`1,2`) paired in the column name.
# Note how `1, 2` is repeated for the extracted age column for each
# combination of `famid` and `birth`. The repeat of `1,2` also
# simulates how it looks in the source data : `ht1 > ht2`.
# As such, for every index, there is a complete set of the data;
# the user can visually see the unpivoted data for each index
# and be assured of complete/accurate sync.
# The code below achieves that.
# scenario 3
if ".value" in names_to:
if names_to.count(".value") > 1:
raise ValueError(
"Column name `.value` must not be duplicated."
)
# extract new column names and assign category dtype
after_df_cols = pd.unique(between_df.loc[:, ".value"])
dot_value_dtype = CategoricalDtype(after_df_cols, ordered=True)
between_df = between_df.astype({".value": dot_value_dtype})
if len(names_to) > 1:
other_header = between_df.columns.difference([".value"])[0]
other_header_values = pd.unique(
between_df.loc[:, other_header]
)
other_header_dtype = CategoricalDtype(
other_header_values, ordered=True
)
between_df = between_df.astype(
{other_header: other_header_dtype}
)
between_df = between_df.sort_values([".value", other_header])
else:
other_header = None
other_header_values = None
# index order not assured if just .value and quicksort
between_df = between_df.sort_values(
[".value"], kind="mergesort"
)
# reshape index_sorter and use the first column as the index
# of the reshaped after_df. after_df will be reshaped into
# specific number of columns, based on the length of
# `after_df_cols`
index_sorter = between_df.index
after_df = after_df.reindex(index_sorter).to_numpy()
after_index = np.reshape(
index_sorter, (-1, len(after_df_cols)), order="F"
)
after_index = after_index[:, 0]
after_df = np.reshape(
after_df, (-1, len(after_df_cols)), order="F"
)
after_df = pd.DataFrame(
after_df, columns=after_df_cols, index=after_index
)
# if `names_to` has a length more than 1,
# then we need to sort the other header, so that there is
# an alternation, ensuring a complete representation of
# each value per index.
# if, however, `names_to` is of length 1, then between_df
# will be an empty dataframe, and its index will be the
# same as the index of `after_df`
# once the indexes are assigned to before, after, and between
# we can recombine with a join to get the proper alternation
# and complete data per index/section
if other_header:
other_header_index = np.reshape(
after_index, (-1, len(other_header_values)), order="F"
)
other_header_index = np.ravel(other_header_index)
between_df = between_df.loc[other_header_index, [other_header]]
else:
other_header_index = None
between_df = pd.DataFrame([], index=after_index)
if position == 0: # no index or column_names supplied
df = pd.DataFrame.join(between_df, after_df, how="inner")
else:
df = pd.DataFrame.join(
before_df, [between_df, after_df], how="inner"
)
return df.reset_index(drop=True).transform(
pd.to_numeric, errors="ignore"
)
# this kicks in if there is no `.value` in `names_to`
# here we reindex the before_df, to simulate the order of the columns
# in the source data.
df = pd.DataFrame.join(
before_df, [between_df, after_df], how="inner"
).reset_index(drop=True)
return df.transform(pd.to_numeric, errors="ignore")
|
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets
# 加载iris数据
iris = datasets.load_iris()
data = iris['data']
# 学习→生成簇
model = cluster.KMeans(n_clusters=3)
model.fit(data)
# 取得学习结果的标签
labels = model.labels_
### 图表的绘制
MARKERS = ["o", "^" , "*" , "v", "+", "x", "d", "p", "s", "1", "2"]
# 用于在指定索引的feature值中创建散布图的函数
def scatter_by_features(feat_idx1, feat_idx2):
for lbl in range(labels.max() + 1):
clustered = data[labels == lbl]
plt.scatter(clustered[:, feat_idx1], clustered[:, feat_idx2],
c='black' ,alpha=0.3,s=100,
marker=MARKERS[lbl], label='label {}'.format(lbl))
plt.xlabel(iris["feature_names"][feat_idx1],fontsize='xx-large')
plt.ylabel(iris["feature_names"][feat_idx2],fontsize='xx-large')
plt.figure(figsize=(16, 16))
# feature "sepal length" 和 "sepal width"
plt.subplot(3, 2, 1)
scatter_by_features(0, 1)
# feature "sepal length" 和 "petal length"
plt.subplot(3, 2, 2)
scatter_by_features(0, 2)
# feature "sepal length" 和 "petal width"
plt.subplot(3, 2, 3)
scatter_by_features(0, 3)
# feature "sepal width" 和 "petal length"
plt.subplot(3, 2, 4)
scatter_by_features(1, 2)
# feature "sepal width" 和 "petal width"
plt.subplot(3, 2, 5)
scatter_by_features(1, 3)
# feature "petal length" 和 "petal width"
plt.subplot(3, 2, 6)
scatter_by_features(2, 3)
plt.tight_layout()
plt.show()
|
from tkinter import *
from weather_display import *
root = Tk()
frame = CustomFrame("Weather Display", row = 0, column= 0)
frame.make_widgets()
root.geometry("550x250")
root.mainloop()
|
#!/usr/bin/env python3
#
# Copyright (c) 2021 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""WLAN Information API Manager."""
from empower_core.service import EService
from empower_core.serialize import serializable_dict
from lightedge_wia_manager.managers.wiamanager.querieshandler \
import QueriesHandler
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8888
@serializable_dict
class APInfo():
""" Information on APs available from the WLAN Access Information
Service."""
def __init__(self):
self.info = {
"apId": None,
"channel": None,
"wlanCap": None,
"wanMetrics": None,
"bssLoad": None,
"extBssLoad": None,
"apLocation": None,
"apNeighbor": None
}
def to_dict(self):
"""Return JSON-serializable representation of the object."""
return self.info
class WIAManager(EService):
"""WLAN Information API Manager.
Parameters:
ctrl_host: sd-ran controller host (optional, default: 127.0.0.1)
ctrl_host: sd-ran controller port (optional, default: 8888)
"""
HANDLERS = [QueriesHandler]
def __init__(self, context, service_id, ctrl_host, ctrl_port):
super().__init__(context=context, service_id=service_id,
ctrl_host=ctrl_host, ctrl_port=ctrl_port)
self.aps = list()
ap1 = APInfo()
self.aps.append(ap1)
def get_aps(self):
"""Return the APs."""
return self.aps
@property
def ctrl_host(self):
"""Return ctrl_host."""
return self.params["ctrl_host"]
@ctrl_host.setter
def ctrl_host(self, value):
"""Set ctrl_host."""
if "ctrl_host" in self.params and self.params["ctrl_host"]:
raise ValueError("Param ctrl_host can not be changed")
self.params["ctrl_host"] = str(value)
@property
def ctrl_port(self):
"""Return ctrl_port."""
return self.params["ctrl_port"]
@ctrl_port.setter
def ctrl_port(self, value):
"""Set host."""
if "ctrl_port" in self.params and self.params["ctrl_port"]:
raise ValueError("Param ctrl_port can not be changed")
self.params["ctrl_port"] = int(value)
def launch(context, service_id, ctrl_host=DEFAULT_HOST,
ctrl_port=DEFAULT_PORT):
""" Initialize the module. """
return WIAManager(context, service_id, ctrl_host, ctrl_port)
|
from resource_management import *
from airflow_base import AirflowBase
from resource_management.core.exceptions import ExecutionFailed
import subprocess
class AirflowServer(AirflowBase):
def install(self, env):
import params
env.set_params(params)
self.install_airflow(env)
print("Installing Airflow")
def configure(self, env):
import params
env.set_params(params)
self.configure_airflow(env)
Execute("service airflow_webserver reload")
Execute("service airflow_scheduler reload")
def start(self, env):
print("Starting airflow")
Execute("service airflow_webserver start")
Execute("service airflow_scheduler start")
def stop(self, env):
print("Stopping airflow")
Execute("service airflow_webserver stop")
Execute("service airflow_scheduler stop")
def restart(self, env):
self.configure_airflow(env)
print("Restartarting airflow")
Execute("service airflow_webserver restart")
Execute("service airflow_scheduler restart")
def status(self, env):
print("Checking airflow status...")
Execute('service airflow_webserver status')
Execute("service airflow_scheduler status")
if __name__ == "__main__":
AirflowServer().execute()
|
import config
import models
import tensorflow as tf
import numpy as np
con = config.Config()
#Input training files from benchmarks/FB15K/ folder.
con.set_in_path("./benchmarks/FB15K/")
#True: Input test files from the same folder.
con.set_test_link_prediction(True)
con.set_test_triple_classification(True)
con.set_log_on(0)
con.set_work_threads(8)
con.set_train_times(10)
con.set_nbatches(1000)
con.set_alpha(0.001)
con.set_margin(1.0)
con.set_bern(0)
con.set_dimension(100)
con.set_ent_neg_rate(1)
con.set_rel_neg_rate(0)
con.set_opt_method("SGD")
#Models will be exported via tf.Saver() automatically.
con.set_export_files("./res/model.vec.tf", 0)
#Model parameters will be exported to json files automatically.
con.set_out_files("./res/embedding.vec.json")
#Initialize experimental settings.
con.init()
#Set the knowledge embedding model
con.set_model(models.TransH)
#Train the model.
con.run()
#To test models after training needs "set_test_flag(True)".
con.test()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Activity(Model):
"""An Activity is the basic communication type for the Bot Framework 3.0
protocol.
:param type: The type of the activity. Possible values include: 'message',
'contactRelationUpdate', 'conversationUpdate', 'typing', 'ping',
'endOfConversation', 'event', 'invoke', 'deleteUserData', 'messageUpdate',
'messageDelete', 'installationUpdate', 'messageReaction', 'suggestion',
'trace'
:type type: str or ~botframework.connector.models.ActivityTypes
:param id: ID of this activity
:type id: str
:param timestamp: UTC Time when message was sent (set by service)
:type timestamp: datetime
:param local_timestamp: Local time when message was sent (set by client,
Ex: 2016-09-23T13:07:49.4714686-07:00)
:type local_timestamp: datetime
:param service_url: Service endpoint where operations concerning the
activity may be performed
:type service_url: str
:param channel_id: ID of the channel where the activity was sent
:type channel_id: str
:param from_property: Sender address
:type from_property: ~botframework.connector.models.ChannelAccount
:param conversation: Conversation
:type conversation: ~botframework.connector.models.ConversationAccount
:param recipient: (Outbound to bot only) Bot's address that received the
message
:type recipient: ~botframework.connector.models.ChannelAccount
:param text_format: Format of text fields Default:markdown. Possible
values include: 'markdown', 'plain', 'xml'
:type text_format: str or ~botframework.connector.models.TextFormatTypes
:param attachment_layout: Hint for how to deal with multiple attachments.
Default:list. Possible values include: 'list', 'carousel'
:type attachment_layout: str or
~botframework.connector.models.AttachmentLayoutTypes
:param members_added: Members added to the conversation
:type members_added: list[~botframework.connector.models.ChannelAccount]
:param members_removed: Members removed from the conversation
:type members_removed: list[~botframework.connector.models.ChannelAccount]
:param reactions_added: Reactions added to the activity
:type reactions_added:
list[~botframework.connector.models.MessageReaction]
:param reactions_removed: Reactions removed from the activity
:type reactions_removed:
list[~botframework.connector.models.MessageReaction]
:param topic_name: The conversation's updated topic name
:type topic_name: str
:param history_disclosed: True if prior history of the channel is
disclosed
:type history_disclosed: bool
:param locale: The language code of the Text field
:type locale: str
:param text: Content for the message
:type text: str
:param speak: SSML Speak for TTS audio response
:type speak: str
:param input_hint: Input hint to the channel on what the bot is expecting.
Possible values include: 'acceptingInput', 'ignoringInput',
'expectingInput'
:type input_hint: str or ~botframework.connector.models.InputHints
:param summary: Text to display if the channel cannot render cards
:type summary: str
:param suggested_actions: SuggestedActions are used to provide
keyboard/quickreply like behavior in many clients
:type suggested_actions: ~botframework.connector.models.SuggestedActions
:param attachments: Attachments
:type attachments: list[~botframework.connector.models.Attachment]
:param entities: Collection of Entity objects, each of which contains
metadata about this activity. Each Entity object is typed.
:type entities: list[~botframework.connector.models.Entity]
:param channel_data: Channel-specific payload
:type channel_data: object
:param action: ContactAdded/Removed action
:type action: str
:param reply_to_id: The original ID this message is a response to
:type reply_to_id: str
:param label: Descriptive label
:type label: str
:param value_type: Unique string which identifies the shape of the value
object
:type value_type: str
:param value: Open-ended value
:type value: object
:param name: Name of the operation to invoke or the name of the event
:type name: str
:param relates_to: Reference to another conversation or activity
:type relates_to: ~botframework.connector.models.ConversationReference
:param code: Code indicating why the conversation has ended. Possible
values include: 'unknown', 'completedSuccessfully', 'userCancelled',
'botTimedOut', 'botIssuedInvalidMessage', 'channelFailed'
:type code: str or ~botframework.connector.models.EndOfConversationCodes
:param expiration: DateTime to expire the activity as ISO 8601 encoded
datetime
:type expiration: datetime
:param importance: Importance of this activity
{Low|Normal|High}, null value indicates Normal importance see
ActivityImportance)
:type importance: str
:param delivery_mode: Hint to describe how this activity should be
delivered.
Currently: null or "Default" = default delivery
"Notification" = notification semantics
:type delivery_mode: str
:param text_highlights: TextHighlight in the activity represented in the
ReplyToId property
:type text_highlights: list[~botframework.connector.models.TextHighlight]
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'local_timestamp': {'key': 'localTimestamp', 'type': 'iso-8601'},
'service_url': {'key': 'serviceUrl', 'type': 'str'},
'channel_id': {'key': 'channelId', 'type': 'str'},
'from_property': {'key': 'from', 'type': 'ChannelAccount'},
'conversation': {'key': 'conversation', 'type': 'ConversationAccount'},
'recipient': {'key': 'recipient', 'type': 'ChannelAccount'},
'text_format': {'key': 'textFormat', 'type': 'str'},
'attachment_layout': {'key': 'attachmentLayout', 'type': 'str'},
'members_added': {'key': 'membersAdded', 'type': '[ChannelAccount]'},
'members_removed': {'key': 'membersRemoved', 'type': '[ChannelAccount]'},
'reactions_added': {'key': 'reactionsAdded', 'type': '[MessageReaction]'},
'reactions_removed': {'key': 'reactionsRemoved', 'type': '[MessageReaction]'},
'topic_name': {'key': 'topicName', 'type': 'str'},
'history_disclosed': {'key': 'historyDisclosed', 'type': 'bool'},
'locale': {'key': 'locale', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'speak': {'key': 'speak', 'type': 'str'},
'input_hint': {'key': 'inputHint', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'str'},
'suggested_actions': {'key': 'suggestedActions', 'type': 'SuggestedActions'},
'attachments': {'key': 'attachments', 'type': '[Attachment]'},
'entities': {'key': 'entities', 'type': '[Entity]'},
'channel_data': {'key': 'channelData', 'type': 'object'},
'action': {'key': 'action', 'type': 'str'},
'reply_to_id': {'key': 'replyToId', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'value_type': {'key': 'valueType', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'},
'name': {'key': 'name', 'type': 'str'},
'relates_to': {'key': 'relatesTo', 'type': 'ConversationReference'},
'code': {'key': 'code', 'type': 'str'},
'expiration': {'key': 'expiration', 'type': 'iso-8601'},
'importance': {'key': 'importance', 'type': 'str'},
'delivery_mode': {'key': 'deliveryMode', 'type': 'str'},
'text_highlights': {'key': 'textHighlights', 'type': '[TextHighlight]'},
}
def __init__(self, *, type=None, id: str=None, timestamp=None, local_timestamp=None, service_url: str=None, channel_id: str=None, from_property=None, conversation=None, recipient=None, text_format=None, attachment_layout=None, members_added=None, members_removed=None, reactions_added=None, reactions_removed=None, topic_name: str=None, history_disclosed: bool=None, locale: str=None, text: str=None, speak: str=None, input_hint=None, summary: str=None, suggested_actions=None, attachments=None, entities=None, channel_data=None, action: str=None, reply_to_id: str=None, label: str=None, value_type: str=None, value=None, name: str=None, relates_to=None, code=None, expiration=None, importance: str=None, delivery_mode: str=None, text_highlights=None, **kwargs) -> None:
super(Activity, self).__init__(**kwargs)
self.type = type
self.id = id
self.timestamp = timestamp
self.local_timestamp = local_timestamp
self.service_url = service_url
self.channel_id = channel_id
self.from_property = from_property
self.conversation = conversation
self.recipient = recipient
self.text_format = text_format
self.attachment_layout = attachment_layout
self.members_added = members_added
self.members_removed = members_removed
self.reactions_added = reactions_added
self.reactions_removed = reactions_removed
self.topic_name = topic_name
self.history_disclosed = history_disclosed
self.locale = locale
self.text = text
self.speak = speak
self.input_hint = input_hint
self.summary = summary
self.suggested_actions = suggested_actions
self.attachments = attachments
self.entities = entities
self.channel_data = channel_data
self.action = action
self.reply_to_id = reply_to_id
self.label = label
self.value_type = value_type
self.value = value
self.name = name
self.relates_to = relates_to
self.code = code
self.expiration = expiration
self.importance = importance
self.delivery_mode = delivery_mode
self.text_highlights = text_highlights
|
"""Kaggle Challenge module - Models"""
|
from django.conf.urls import url, include
from . import views
urlpatterns = [
# urls for school requests - user side
url(r"^$", views.admin_homepage, name="adminhome"),
url(r"^login/$", views.admin_login, name="admin_login"),
url(r"^settings/$", views.admin_settings, name="admin_settings"),
url(r"^settings/", include("applications.settings_urls", namespace="admin_settings")),
url(r"^applications/$", views.applications, name="applications"),
url(r"^requests/$", views.requests, name="requests"),
url(r"^programs/$", views.programs, name="programs"),
url(r"^restaurants/$", views.restaurants, name="restaurants"),
url(r"^resources/$", views.resources, name="resources"),
# review urls for applications
url(r"^application/(?P<id>[0-9]+)/review/$", views.application_review, name="review"),
url(r"^application/(?P<id>[0-9]+)/accept/$", views.accept, name="accept"),
url(r"^application/(?P<id>[0-9]+)/deny/$", views.deny, name="deny"),
# review urls for requests
url(r"^request/(?P<id>[0-9]+)/review/$", views.review_request, name="review_request"),
url(r"^request/(?P<id>[0-9]+)/accept/$", views.accept_request, name="accept_request"),
url(r"^request/(?P<id>[0-9]+)/deny/$", views.deny_request, name="deny_request"),
url("^program/(?P<id>[0-9]+)/$", views.program_profile, name="program"),
url("^program/(?P<id>[0-9]+)/add_note$", views.add_program_note, name="new_program_note"),
url("^program/(?P<id>[0-9]+)/add_contact$", views.add_program_contact,
name="new_program_contact"),
url(r"^restaurant/(?P<id>[0-9]+)/$", views.restaurant_profile, name="restaurant"),
url(r"^restaurant/(?P<id>[0-9]+)/add_note$", views.add_restaurant_note, name="new_restaurant_note"),
url(r"^restaurant/(?P<id>[0-9]+)/add_contact$", views.add_restaurant_contact, name="new_restaurant_contact"),
# view specific user profile
url(r"^user/(?P<id>[0-9]+)/$", views.view_user_profile, name="user_profile"),
url(r"^user/(?P<id>[0-9]+)/edit$", views.edit_user_profile, name="edit_user_profile"),
url(r"^pairings/$", views.pairings, name="pairings"),
url(r"^pairings/add$", views.pairings_add, name="new_pairing"),
url(r"^pairings/delete$", views.pairings_delete, name="delete_pairing"),
# url routing for notifications page
url(r"^notifications/$", views.show_notifications, name="notifications"),
url(r"^notifications/(?P<id>[0-9]+)/visit/$", views.visit_notification, name="visit_notifications"),
url(r"^notifications/(?P<id>[0-9]+)/hover/$", views.hover_notification, name="hover_notifications"),
url(r"^notifications/dismiss_all/$", views.dismiss_all_notifications, name="dismiss_notifications"),
url(r"^mealhistory/$", views.meal_history, name="meal_history"),
url(r"^mealhistory/update$", views.meal_history_update, name="meal_history_update"),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-15 20:26
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AnimalBreed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
options={
'verbose_name': 'raza',
'verbose_name_plural': 'razas',
},
),
migrations.CreateModel(
name='AnimalType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
options={
'verbose_name': 'tipo de animal',
'verbose_name_plural': 'tipo de animales',
},
),
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
options={
'verbose_name_plural': 'colores',
},
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
('phone_number', models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
],
options={
'verbose_name': 'organización',
},
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='nombre')),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name', verbose_name='nemotécnico')),
('photo_first', models.ImageField(upload_to='patient/%Y/%m/%d/', verbose_name='foto 1')),
('photo_second', models.ImageField(upload_to='patient/%Y/%m/%d/', verbose_name='foto 2')),
('photo_third', models.ImageField(upload_to='patient/%Y/%m/%d/', verbose_name='foto 3')),
('animal_breed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.AnimalBreed', verbose_name='raza / tipo de animal')),
],
options={
'verbose_name': 'paciente',
'verbose_name_plural': 'pacientes',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
('phone_number', models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
],
options={
'verbose_name': 'persona',
},
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('natural_key', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name')),
],
options={
'verbose_name_plural': 'roles',
},
),
migrations.AddField(
model_name='person',
name='rol',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Role', verbose_name='rol'),
),
migrations.AddField(
model_name='person',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='person', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='patient',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Person', verbose_name='autor'),
),
migrations.AddField(
model_name='patient',
name='color_first',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='color_first', to='core.Color', verbose_name='color primario'),
),
migrations.AddField(
model_name='patient',
name='color_second',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='color_second', to='core.Color', verbose_name='color secundario'),
),
migrations.AddField(
model_name='animalbreed',
name='animal_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.AnimalType'),
),
]
|
'''
Trie class will be built with a list of words
It will be used for searching an autocompleting words
'''
class TrieNode:
def __init__(self):
# children is a dictionary from next character to the next trie node
self.children = {}
self.data = None
@staticmethod
def insert(node, word):
for c in word:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.data = word
@staticmethod
def find(node, word):
for c in word:
if c in node.children:
node = node.children[c]
else:
return None
return node.data
@staticmethod
def is_prefix(node, prefix):
for c in prefix:
if c in node.children:
node = node.children[c]
else:
return False
return len(node.children.keys()) != 0
@staticmethod
def is_word(node, word):
for c in word:
if c in node.children:
node = node.children[c]
else:
return False
return node.data is not None
@staticmethod
def print_all_words_after_node(node):
if node.data:
print(node.data)
for childNode in node.children.values():
TrieNode.print_all_words_after_node(childNode)
@staticmethod
def print_all_words_after_prefix(node, prefix):
for c in prefix:
if c in node.children:
node = node.children[c]
else:
print("NONE EXISTS")
return
TrieNode.print_all_words_after_node(node)
class Trie:
# build the trie from the words given.
def __init__(self, words):
self.top = TrieNode()
for word in words:
TrieNode.insert(self.top, word)
print("Built Trie!")
def find(self, word):
return TrieNode.find(self.top, word)
def insert(self, word):
TrieNode.insert(self.top, word)
def is_prefix(self, prefix):
return TrieNode.is_prefix(self.top, prefix)
def print_all_words_after_prefix(self, prefix):
TrieNode.print_all_words_after_prefix(self.top, prefix)
def is_word(self, word):
return TrieNode.is_word(self.top, word)
|
from nanome._internal._structure._complex import _Complex
from nanome._internal import _PluginInstance
from nanome._internal._network import PluginNetwork
from nanome._internal._network._commands._callbacks import _Messages
from nanome.util import Matrix, Logs
from .io import ComplexIO
from . import Base
class Complex(_Complex, Base):
"""
| Represents a Complex that contains molecules.
"""
io = ComplexIO()
def __init__(self):
super(Complex, self).__init__()
self._rendering = Complex.Rendering(self)
self._molecular = Complex.Molecular(self)
self._transform = Complex.Transform(self)
self.io = ComplexIO(self)
def add_molecule(self, molecule):
"""
| Add a molecule to this complex
:param molecule: Molecule to add to the chain
:type molecule: :class:`~nanome.structure.Molecule`
"""
molecule.index = -1
self._add_molecule(molecule)
def remove_molecule(self, molecule):
"""
| Remove a molecule from this complex
:param molecule: Molecule to remove from the chain
:type molecule: :class:`~nanome.structure.Molecule`
"""
molecule.index = -1
self._remove_molecule(molecule)
# region Generators
@property
def molecules(self):
"""
| The list of molecules within this complex
"""
for molecule in self._molecules:
yield molecule
@molecules.setter
def molecules(self, molecule_list):
self._molecules = molecule_list
@property
def chains(self):
"""
| The list of chains within this complex
"""
for molecule in self.molecules:
for chain in molecule.chains:
yield chain
@property
def residues(self):
"""
| The list of residues within this complex
"""
for chain in self.chains:
for residue in chain.residues:
yield residue
@property
def atoms(self):
"""
| The list of atoms within this complex
"""
for residue in self.residues:
for atom in residue.atoms:
yield atom
@property
def bonds(self):
"""
| The list of bonds within this complex
"""
for residue in self.residues:
for bond in residue.bonds:
yield bond
# endregion
# region all fields
@property
def boxed(self):
"""
| Represents if this complex is boxed/bordered in Nanome.
:type: :class:`bool`
"""
return self._boxed
@boxed.setter
def boxed(self, value):
self._boxed = value
@property
def locked(self):
"""
| Represents if this complex is locked and unmovable in Nanome.
:type: :class:`bool`
"""
return self._locked
@locked.setter
def locked(self, value):
self._locked = value
if (value):
self._boxed = True
@property
def visible(self):
"""
| Represents if this complex is visible in Nanome.
:type: :class:`bool`
"""
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
@property
def computing(self):
return self._computing
@computing.setter
def computing(self, value):
self._computing = value
@property
def current_frame(self):
"""
| Represents the current animation frame the complex is in.
:type: :class:`int`
"""
return self._current_frame
def set_current_frame(self, value):
value = max(0, min(value, len(self._molecules) - 1))
self._current_frame = value
# returns true if the complex is selected on nanome.
def get_selected(self):
return self._selected
def get_all_selected(self):
for atom in self.atoms:
if not atom.selected:
return False
return True
def set_all_selected(self, value):
for atom in self.atoms:
atom.selected = value
def set_surface_needs_redraw(self):
self._surface_dirty = True
@property
def box_label(self):
"""
| Represents the label on the box surrounding the complex
:type: :class:`str`
"""
return self._box_label
@box_label.setter
def box_label(self, value):
self._box_label = value
@property
def name(self):
"""
| Represents the name of the complex
:type: :class:`str`
"""
return self._name
@name.setter
def name(self, value):
if type(value) is not str:
value = str(value)
self._name = value
@property
def index_tag(self):
return self._index_tag
@index_tag.setter
def index_tag(self, value):
self._index_tag = value
@property
def split_tag(self):
return self._split_tag
@split_tag.setter
def split_tag(self, value):
self._split_tag = value
@property
def full_name(self):
"""
| Represents the full name of the complex with its tags and name
:type: :class:`str`
"""
fullname = self._name
has_tag = False
if self._index_tag > 0:
fullname = fullname + " {" + str(self._index_tag)
has_tag = True
if self._split_tag is not None and len(self._split_tag) > 0:
if has_tag:
fullname = fullname + "-" + self._split_tag
else:
fullname = fullname + " {" + self._split_tag
has_tag = True
if has_tag:
fullname = fullname + "}"
return fullname
@full_name.setter
def full_name(self, value):
self._name = value
self._index_tag = 0
self._split_tag = ''
@property
def position(self):
"""
| Position of the complex
:type: :class:`~nanome.util.Vector3`
"""
return self._position
@position.setter
def position(self, value):
self._position = value
@property
def rotation(self):
"""
| Rotation of the complex
:type: :class:`~nanome.util.Quaternion`
"""
return self._rotation
@rotation.setter
def rotation(self, value):
self._rotation = value
def get_workspace_to_complex_matrix(self):
return self.get_complex_to_workspace_matrix().get_inverse()
def get_complex_to_workspace_matrix(self):
return Matrix.compose_transformation_matrix(self._position, self._rotation)
# endregion
def convert_to_conformers(self, force_conformers=None):
return self._convert_to_conformers(force_conformers)
def convert_to_frames(self):
return self._convert_to_frames()
def register_complex_updated_callback(self, callback):
self._complex_updated_callback = callback
_PluginInstance._hook_complex_updated(self.index, callback)
PluginNetwork._send(_Messages.hook_complex_updated, self.index, False)
def register_selection_changed_callback(self, callback):
self._selection_changed_callback = callback
_PluginInstance._hook_selection_changed(self.index, callback)
PluginNetwork._send(_Messages.hook_selection_changed, self.index, False)
@staticmethod
def align_origins(target_complex, *other_complexes):
for complex in other_complexes:
complex.position = target_complex.position.get_copy()
complex.rotation = target_complex.rotation.get_copy()
# region deprecated
@current_frame.setter
@Logs.deprecated()
def current_frame(self, value):
self._current_frame = value
@property
@Logs.deprecated()
def rendering(self):
return self._rendering
@property
@Logs.deprecated()
def molecular(self):
return self._molecular
@property
@Logs.deprecated()
def transform(self):
return self._transform
class Rendering(object):
def __init__(self, parent):
self.parent = parent
@property
def boxed(self):
return self.parent._boxed
@boxed.setter
def boxed(self, value):
self.parent.boxed = value
@property
def locked(self):
return self.parent.locked
@locked.setter
def locked(self, value):
self.parent.locked = value
if (value):
self.parent.boxed = True
@property
def visible(self):
return self.parent.visible
@visible.setter
def visible(self, value):
self.parent.visible = value
@property
def computing(self):
return self.parent.computing
@computing.setter
def computing(self, value):
self.parent.computing = value
@property
def current_frame(self):
return self.parent.current_frame
@current_frame.setter
def current_frame(self, value):
self.parent.current_frame = value
# returns true if the complex is selected on nanome.
def get_selected(self):
return self.parent.selected
def set_surface_needs_redraw(self):
self.parent.surface_dirty = True
@property
def box_label(self):
return self._box_label
@box_label.setter
def box_label(self, value):
self._box_label = value
class Molecular(object):
def __init__(self, parent):
self.parent = parent
@property
def name(self):
return self.parent.name
@name.setter
def name(self, value):
self.parent.name = value
@property
def index_tag(self):
return self.parent.index_tag
@index_tag.setter
def index_tag(self, value):
self.parent.index_tag = value
@property
def split_tag(self):
return self.parent.split_tag
@split_tag.setter
def split_tag(self, value):
self.parent.split_tag = value
class Transform(object):
def __init__(self, parent):
self.parent = parent
@property
def position(self):
return self.parent.position
@position.setter
def position(self, value):
self.parent.position = value
@property
def rotation(self):
return self.parent.rotation
@rotation.setter
def rotation(self, value):
self.parent.rotation = value
def get_workspace_to_complex_matrix(self):
rotation = Matrix.from_quaternion(self.parent.rotation)
rotation.transpose()
translation = Matrix.identity(4)
translation[0][3] = -self.parent.position.x
translation[1][3] = -self.parent.position.y
translation[2][3] = -self.parent.position.z
transformation = rotation * translation
return transformation
def get_complex_to_workspace_matrix(self):
result = self.parent.get_workspace_to_complex_matrix()
result = result.get_inverse()
return result
# endregion
Complex.io._setup_addon(Complex)
_Complex._create = Complex
|
import unittest
import pathlib
import tempfile
import shutil
class MosgalTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tests_files_directory = pathlib.Path(pathlib.Path(__file__).parent, 'tests_files')
self.temporary_directory = pathlib.Path(tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(self.temporary_directory)
def copy_to_temporary_directory(self, file_in_test_dir: str, new_name: str = ''):
"""Copy the content of a file from the ``test_file_directory`` to the temporary directory
:param file_in_test_dir: path to the file to copy
:param new_name: the new name of the file in the temporary directory (if blank, the one from path is used)
:rtype: str
"""
path_in_test = pathlib.Path(self.tests_files_directory, file_in_test_dir)
if not path_in_test.exists():
raise FileNotFoundError(path_in_test)
if not new_name:
new_name = path_in_test.name
path_in_temp = pathlib.Path(self.temporary_directory, new_name)
if path_in_temp.exists():
raise FileExistsError(path_in_temp)
with path_in_temp.open('wb') as f:
with path_in_test.open('rb') as fx:
f.write(fx.read())
return path_in_temp
|
import psutil
import os, sys
def get_process_memory():
process = psutil.Process(os.getpid())
mem_info = process.memory_info()
return mem_info.rss/1024/1024
print(get_process_memory()/1024/1024)
|
import torch
import torchvision
import torch.nn as nn
import torch.distributed as dist
import torchvision.transforms as transforms
from apex import amp
from datetime import datetime
from apex.parallel import DistributedDataParallel as DDP
from model import ConvNet
def train(gpu, args):
rank = args.nr * args.gpus + gpu
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=rank)
torch.manual_seed(0)
model = ConvNet()
torch.cuda.set_device(gpu)
model.cuda(gpu)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr)
# Wrap the model
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
model = DDP(model)
# Data loading code
train_dataset = torchvision.datasets.MNIST(
root='./data',
train=True,
transform=transforms.ToTensor(),
download=True
)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=args.world_size,
rank=rank)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.n_workers,
pin_memory=True,
sampler=train_sampler
)
start = datetime.now()
total_step = len(train_loader)
for epoch in range(args.epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if (i + 1) % 100 == 0 and gpu == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
epoch + 1,
args.epochs,
i + 1,
total_step,
loss.item())
)
if gpu == 0:
print("Training complete in: " + str(datetime.now() - start))
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary to run train and evaluation on object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from object_detection import model_hparams
from object_detection import model_lib
tf.logging.set_verbosity(tf.logging.INFO)
#----------------------------ASKING FOR ARGUMENTS-----------------------------
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job. Note '
'that one call only use this in eval-only mode, and '
'`checkpoint_dir` must be supplied.')
flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'hparams_overrides', None, 'Hyperparameter overrides, '
'represented as a string containing comma-separated '
'hparam_name=value pairs.')
flags.DEFINE_string(
'checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is provided, this binary operates in eval-only mode, '
'writing resulting metrics to `model_dir`.')
flags.DEFINE_boolean(
'run_once', False, 'If running in eval-only mode, whether to run just '
'one round of eval vs running continuously (default).'
)
FLAGS = flags.FLAGS
def main(unused_argv):
#There are two arguments that must be provided.
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
#The variable 'config' contains instructions on saving checkpoints and
#general training info.
config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir,
log_step_count_steps=10,
save_checkpoints_steps=1000)
# the 'create_estimator_and_inputs' function
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
hparams=model_hparams.create_hparams(FLAGS.hparams_overrides),#this is
# worth paying attention to, considering that I will need to do some tuning.
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples))
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
if FLAGS.checkpoint_dir:
if FLAGS.eval_training_data:
name = 'training_data'
input_fn = eval_on_train_input_fn
else:
name = 'validation_data'
# The first eval input will be evaluated.
input_fn = eval_input_fns[0]
if FLAGS.run_once:
estimator.evaluate(input_fn,
checkpoint_path=tf.train.latest_checkpoint(
FLAGS.checkpoint_dir))
else:
model_lib.continuous_eval(estimator, FLAGS.checkpoint_dir, input_fn,
train_steps, name)
else:
train_spec, eval_specs = model_lib.create_train_and_eval_specs(
train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False)
# Currently only a single Eval Spec is allowed.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
if __name__ == '__main__':
tf.app.run()
|
# coding: utf-8
from hashlib import sha1
from lxml import etree, objectify
from pprint import pformat
from unicodedata import normalize
from urllib import urlencode
import datetime
import logging
import time
import urllib2
import urlparse
from odoo import api, fields, models, _
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment_ogone.controllers.main import OgoneController
from odoo.addons.payment_ogone.data import ogone
from odoo.tools import float_round, DEFAULT_SERVER_DATE_FORMAT
from odoo.tools.float_utils import float_compare, float_repr
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('ogone', 'Ogone')])
ogone_pspid = fields.Char('PSPID', required_if_provider='ogone', groups='base.group_user')
ogone_userid = fields.Char('API User ID', required_if_provider='ogone', groups='base.group_user')
ogone_password = fields.Char('API User Password', required_if_provider='ogone', groups='base.group_user')
ogone_shakey_in = fields.Char('SHA Key IN', size=32, required_if_provider='ogone', groups='base.group_user')
ogone_shakey_out = fields.Char('SHA Key OUT', size=32, required_if_provider='ogone', groups='base.group_user')
ogone_alias_usage = fields.Char('Alias Usage', default="Allow saving my payment data",
help="If you want to use Ogone Aliases, this default "
"Alias Usage will be presented to the customer as the "
"reason you want to keep his payment data")
def _get_feature_support(self):
"""Get advanced feature support by provider.
Each provider should add its technical in the corresponding
key for the following features:
* fees: support payment fees computations
* authorize: support authorizing payment (separates
authorization and capture)
* tokenize: support saving payment data in a payment.tokenize
object
"""
res = super(PaymentAcquirerOgone, self)._get_feature_support()
res['tokenize'].append('ogone')
return res
def _get_ogone_urls(self, environment):
""" Ogone URLS:
- standard order: POST address for form-based """
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _ogone_generate_shasign(self, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param string inout: 'in' (odoo contacting ogone) or 'out' (ogone
contacting odoo). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert self.provider == 'ogone'
key = getattr(self, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',
'CARDNO',
'CCCTY',
'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PAYIDSUB',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, values):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
ogone_tx_values = dict(values)
temp_ogone_tx_values = {
'PSPID': self.ogone_pspid,
'ORDERID': values['reference'],
'AMOUNT': float_repr(float_round(values['amount'], 2) * 100, 0),
'CURRENCY': values['currency'] and values['currency'].name or '',
'LANGUAGE': values.get('partner_lang'),
'CN': values.get('partner_name'),
'EMAIL': values.get('partner_email'),
'OWNERZIP': values.get('partner_zip'),
'OWNERADDRESS': values.get('partner_address'),
'OWNERTOWN': values.get('partner_city'),
'OWNERCTY': values.get('partner_country') and values.get('partner_country').code or '',
'OWNERTELNO': values.get('partner_phone'),
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
'PARAMPLUS': 'return_url=%s' % ogone_tx_values.pop('return_url') if ogone_tx_values.get('return_url') else False,
}
if self.save_token in ['ask', 'always']:
temp_ogone_tx_values.update({
'ALIAS': 'ODOO-NEW-ALIAS-%s' % time.time(), # something unique,
'ALIASUSAGE': values.get('alias_usage') or self.ogone_alias_usage,
})
shasign = self._ogone_generate_shasign('in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return ogone_tx_values
def ogone_get_form_action_url(self):
return self._get_ogone_urls(self.environment)['ogone_standard_order_url']
def ogone_s2s_form_validate(self, data):
error = dict()
mandatory_fields = ["cc_number", "cc_cvc", "cc_holder_name", "cc_expiry", "cc_brand"]
# Validation
for field_name in mandatory_fields:
if not data.get(field_name):
error[field_name] = 'missing'
return False if error else True
def ogone_s2s_form_process(self, data):
values = {
'cc_number': data.get('cc_number'),
'cc_cvc': int(data.get('cc_cvc')),
'cc_holder_name': data.get('cc_holder_name'),
'cc_expiry': data.get('cc_expiry'),
'cc_brand': data.get('cc_brand'),
'acquirer_id': int(data.get('acquirer_id')),
'partner_id': int(data.get('partner_id'))
}
pm_id = self.env['payment.token'].sudo().create(values)
return pm_id
class PaymentTxOgone(models.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def _ogone_form_get_tx_from_data(self, data):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. Create a payment token if an alias is returned."""
reference, pay_id, shasign, alias = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN'), data.get('ALIAS')
if not reference or not pay_id or not shasign:
error_msg = _('Ogone: received data with missing reference (%s) or pay_id (%s) or shasign (%s)') % (reference, pay_id, shasign)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx = self.search([('reference', '=', reference)])
if not tx or len(tx) > 1:
error_msg = _('Ogone: received data for reference %s') % (reference)
if not tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
# verify shasign
shasign_check = tx.acquirer_id._ogone_generate_shasign('out', data)
if shasign_check.upper() != shasign.upper():
error_msg = _('Ogone: invalid shasign, received %s, computed %s, for data %s') % (shasign, shasign_check, data)
_logger.info(error_msg)
raise ValidationError(error_msg)
if not tx.acquirer_reference:
tx.acquirer_reference = pay_id
# alias was created on ogone server, store it
if alias and tx.type == 'form_save':
Token = self.env['payment.token']
domain = [('acquirer_ref', '=', alias)]
cardholder = data.get('CN')
if not Token.search_count(domain):
_logger.info('Ogone: saving alias %s for partner %s' % (data.get('CARDNO'), tx.partner_id))
ref = Token.create({'name': data.get('CARDNO') + (' - ' + cardholder if cardholder else ''),
'partner_id': tx.partner_id.id,
'acquirer_id': tx.acquirer_id.id,
'acquirer_ref': alias})
tx.write({'payment_token_id': ref.id})
return tx
def _ogone_form_get_invalid_parameters(self, data):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if self.acquirer_reference and data.get('PAYID') != self.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), self.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), self.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % self.amount))
if data.get('currency') != self.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), self.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, data):
if self.state == 'done':
_logger.info('Ogone: trying to validate an already validated tx (ref %s)', self.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
vals = {
'state': 'done',
'date_validate': datetime.datetime.strptime(data['TRXDATE'], '%m/%d/%y').strftime(DEFAULT_SERVER_DATE_FORMAT),
'acquirer_reference': data['PAYID'],
}
if data.get('ALIAS') and self.partner_id and \
(self.type == 'form_save' or self.acquirer_id.save_token == 'always')\
and not self.payment_token_id:
pm = self.env['payment.token'].create({
'partner_id': self.partner_id.id,
'acquirer_id': self.acquirer_id.id,
'acquirer_ref': data.get('ALIAS'),
'name': '%s - %s' % (data.get('CARDNO'), data.get('CN'))
})
vals.update(payment_token_id=pm.id)
self.write(vals)
if self.callback_eval:
safe_eval(self.callback_eval, {'self': self})
return True
elif status in self._ogone_cancel_tx_status:
self.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status or status in self._ogone_wait_tx_status:
self.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERRORPLUS'),
'error_code': data.get('NCERROR'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERROR')),
}
_logger.info(error)
self.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_do_transaction(self, **kwargs):
# TODO: create tx with s2s type
account = self.acquirer_id
reference = self.reference or "ODOO-%s-%s" % (datetime.datetime.now().strftime('%y%m%d_%H%M%S'), self.partner_id.id)
data = {
'PSPID': account.ogone_pspid,
'USERID': account.ogone_userid,
'PSWD': account.ogone_password,
'ORDERID': reference,
'AMOUNT': long(self.amount * 100),
'CURRENCY': self.currency_id.name,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': self.payment_token_id.acquirer_ref,
'RTIMEOUT': 30,
}
if kwargs.get('3d_secure'):
data.update({
'FLAG3D': 'Y',
'LANGUAGE': self.partner_id.lang or 'en_US',
})
for url in 'accept decline exception'.split():
key = '{0}_url'.format(url)
val = kwargs.pop(key, None)
if val:
key = '{0}URL'.format(url).upper()
data[key] = val
data['SHASIGN'] = self.acquirer_id._ogone_generate_shasign('in', data)
direct_order_url = 'https://secure.ogone.com/ncol/%s/orderdirect.asp' % (self.acquirer_id.environment)
_logger.debug("Ogone data %s", pformat(data))
request = urllib2.Request(direct_order_url, urlencode(data))
result = urllib2.urlopen(request).read()
_logger.debug('Ogone response = %s', result)
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
return self._ogone_s2s_validate_tree(tree)
def _ogone_s2s_validate(self):
tree = self._ogone_s2s_get_tx_status()
return self._ogone_s2s_validate_tree(tree)
def _ogone_s2s_validate_tree(self, tree, tries=2):
if self.state not in ('draft', 'pending'):
_logger.info('Ogone: trying to validate an already validated tx (ref %s)', self.reference)
return True
status = int(tree.get('STATUS') or 0)
if status in self._ogone_valid_tx_status:
self.write({
'state': 'done',
'date_validate': datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT),
'acquirer_reference': tree.get('PAYID'),
})
if tree.get('ALIAS') and self.partner_id and \
(self.type == 'form_save' or self.acquirer_id.save_token == 'always')\
and not self.payment_token_id:
pm = self.env['payment.token'].create({
'partner_id': self.partner_id.id,
'acquirer_id': self.acquirer_id.id,
'acquirer_ref': tree.get('ALIAS'),
'name': tree.get('CARDNO'),
})
self.write({'payment_token_id': pm.id})
if self.callback_eval:
safe_eval(self.callback_eval, {'self': self})
return True
elif status in self._ogone_cancel_tx_status:
self.write({
'state': 'cancel',
'acquirer_reference': tree.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
self.write({
'state': 'pending',
'acquirer_reference': tree.get('PAYID'),
'html_3ds': str(tree.HTML_ANSWER).decode('base64')
})
elif status in self._ogone_wait_tx_status and tries > 0:
time.sleep(0.5)
self.write({'acquirer_reference': tree.get('PAYID')})
tree = self._ogone_s2s_get_tx_status()
return self._ogone_s2s_validate_tree(tree, tries - 1)
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': tree.get('NCERRORPLUS'),
'error_code': tree.get('NCERROR'),
'error_msg': ogone.OGONE_ERROR_MAP.get(tree.get('NCERROR')),
}
_logger.info(error)
self.write({
'state': 'error',
'state_message': error,
'acquirer_reference': tree.get('PAYID'),
})
return False
def _ogone_s2s_get_tx_status(self):
account = self.acquirer_id
#reference = tx.reference or "ODOO-%s-%s" % (datetime.datetime.now().strftime('%Y%m%d_%H%M%S'), tx.partner_id.id)
data = {
'PAYID': self.acquirer_reference,
'PSPID': account.ogone_pspid,
'USERID': account.ogone_userid,
'PSWD': account.ogone_password,
}
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (self.acquirer_id.environment)
_logger.debug("Ogone data %s", pformat(data))
request = urllib2.Request(query_direct_url, urlencode(data))
result = urllib2.urlopen(request).read()
_logger.debug('Ogone response = %s', result)
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
return tree
class PaymentToken(models.Model):
_inherit = 'payment.token'
def ogone_create(self, values):
if values.get('cc_number'):
# create a alias via batch
values['cc_number'] = values['cc_number'].replace(' ', '')
acquirer = self.env['payment.acquirer'].browse(values['acquirer_id'])
alias = 'ODOO-NEW-ALIAS-%s' % time.time()
expiry = str(values['cc_expiry'][:2]) + str(values['cc_expiry'][-2:])
line = 'ADDALIAS;%(alias)s;%(cc_holder_name)s;%(cc_number)s;%(expiry)s;%(cc_brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry=expiry, pspid=acquirer.ogone_pspid)
data = {
'FILE_REFERENCE': alias,
'TRANSACTION_CODE': 'MTR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': normalize('NFKD', line).encode('ascii','ignore'), # Ogone Batch must be ASCII only
'REPLY_TYPE': 'XML',
'PSPID': acquirer.ogone_pspid,
'USERID': acquirer.ogone_userid,
'PSWD': acquirer.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
url = 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (acquirer.environment,)
request = urllib2.Request(url, urlencode(data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = tree.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error)
return {
'acquirer_ref': alias,
'name': 'XXXXXXXXXXXX%s - %s' % (values['cc_number'][-4:], values['cc_holder_name'])
}
return {}
|
#!/usr/bin/python
"""Module for syncing Adobe AD groups with the Adobe Management Portal.
Usage: adobe_sync.py [--dummy] [--debug]
--dummy: Sets the testOnly flag when talking to the API, commands will not really be executed.
--debug: Very verbose output!
"""
import json
import logging
import sys
import time
from urllib import urlencode
import jwt # pip install --upgrade PyJWT
# On macOS:
# pip install --upgrade python-ldap --global-option=build_ext \
# --global-option="-I$(xcrun --show-sdk-path)/usr/include/sasl"
import ldap
import requests # pip install --upgrade requests
# Map of Adobe product configuarion names (from the portal), to AD group
SOFTWARE_GROUPS = {
'Adobe Captivate': 'CN=Captivate,OU=Software Groups,DC=megacorp,DC=com',
'Adobe Illustrator': 'CN=Illustrator,OU=Software Groups,DC=megacorp,DC=com',
}
# These users should never be removed from the portal (ie. admin users).
# They should be manually managed via the portal.
UNREMOVABLE_USERS = ['admin@megacorp.com']
class ActiveDirectory(object):
"""Gathers together methods for querying AD using LDAP."""
LDAP_SERVER = 'ldaps://ldap.megacorp.com:636'
USER_BASE_DN = 'OU=Users,DC=megacorp,DC=com'
BIND_ACCOUNT = 'CN=adobe-sync,OU=Robots,DC=megacorp,DC=com'
BIND_PASSWORD = 'hunter2'
MAIL_ATTR = 'email'
FIRSTNAME_ATTR = 'firstname'
LASTNAME_ATTR = 'lastname'
def __init__(self):
self.connection = ldap.initialize(self.LDAP_SERVER)
self.connection.bind_s(self.BIND_ACCOUNT, self.BIND_PASSWORD)
self.dn_to_email_cache = {}
self.email_to_user_details = {}
def __del__(self):
self.connection.unbind_s()
def query_ldap_for_group(self, group_dn, counter=0):
"""Returns list of users (DNs) belonging to the specified group."""
search_filter = '(&(objectClass=group)(distinguishedName=%s))' % group_dn
attribute = 'member;range=%s-%s' % (counter, counter + 1499)
base_dn = 'OU=%s' % group_dn.split('OU=', 1)[1]
ldap_result_id = self.connection.search(base_dn, ldap.SCOPE_SUBTREE, search_filter, [attribute])
result_set = []
while True:
result_type, result_data = self.connection.result(ldap_result_id, 0)
if not result_data:
for result in list(result_set):
if 'Security Groups' in result:
logging.debug('Group contains sub-group: %s', result)
result_set.extend(self.query_ldap_for_group(result))
return list(set(result_set))
else:
if result_type == ldap.RES_SEARCH_ENTRY or result_type == ldap.RES_SEARCH_RESULT:
if result_data[0][1]:
key, value = result_data[0][1].popitem()
result_set.extend(value)
if not key.endswith('*'):
logging.debug('Group is large, getting results (%s-%s): %s',
counter + 1500, counter + 1500 + 1499, group_dn)
result_set.extend(self.query_ldap_for_group(group_dn, counter=counter + 1500))
def process_group_members(self, member_dns):
"""Process the user DNs
DNs are useless to us, we need to get at the AD "mail" property. So we have
to query LDAP for every user. Sometimes a user will be in multiple groups
and we don't want to end up querying again so we cache the lookups in a
dictionary (dn_to_email_cache).
Returns a list of emails (AD mail property).
"""
emails = []
for member in member_dns:
if self.dn_to_email_cache.get(member):
emails.append(self.dn_to_email_cache[member])
else:
search_filter = ('(distinguishedName=%s)' %
member.replace('(', r'\(').replace(')', r'\)'))
result = self.connection.search_s(self.USER_BASE_DN, ldap.SCOPE_SUBTREE,
search_filter,
[self.MAIL_ATTR, self.FIRSTNAME_ATTR, self.LASTNAME_ATTR])
if result:
email = result[0][1][self.MAIL_ATTR][0].strip().lower()
self.dn_to_email_cache[member] = email
self.email_to_user_details[email] = {
'firstname': result[0][1][self.FIRSTNAME_ATTR][0].strip(),
'lastname': result[0][1][self.LASTNAME_ATTR][0].strip()
}
emails.append(email)
return sorted(list(set(emails)))
class AdobeSync(object):
"""Object collects methods for interacting with the Adobe Portal."""
PRIVATE_KEY_PATH = 'adobe-private.key'
def __init__(self):
with open(self.PRIVATE_KEY_PATH, 'r') as priv_key_file:
priv_key = priv_key_file.read()
self.config = {
'host': 'usermanagement.adobe.io',
'endpoint': 'v2/usermanagement',
'ims_host': 'ims-na1.adobelogin.com',
'ims_endpoint_jwt': '/ims/exchange/jwt',
'org_id': 'my_org_id@AdobeOrg',
'api_key': 'my_api_key',
'client_secret': 'my_client_secret',
'tech_acct': 'my_tech_account@techacct.adobe.com',
'priv_key': priv_key,
}
self.jwt_token = self._generate_jwt()
logging.debug('JSON Web Token:\n%s', self.jwt_token)
self.access_token = self._obtain_access_token()
self.portal_users = {}
self.user_mods_additions = {}
self.user_mods_subtractions = {}
self.commands = []
def _generate_jwt(self):
# set expiry time for JSON Web Token
expiry_time = int(time.time()) + 60 * 60 * 24
# create payload
payload = {
'exp' : expiry_time,
'iss' : self.config['org_id'],
'sub' : self.config['tech_acct'],
'aud' : 'https://%s/c/%s' % (self.config['ims_host'], self.config['api_key']),
'https://%s/s/ent_user_sdk' % self.config['ims_host']: True,
}
return jwt.encode(payload, self.config['priv_key'], algorithm='RS256').decode('utf-8')
def _obtain_access_token(self):
# method parameters
url = 'https://%s%s' % (self.config['ims_host'], self.config['ims_endpoint_jwt'])
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache',
}
body_credentials = {
'client_id' : self.config['api_key'],
'client_secret' : self.config['client_secret'],
'jwt_token' : self.jwt_token,
}
body = urlencode(body_credentials)
# send http request
res = requests.post(url, headers=headers, data=body)
# evaluate response
if res.status_code == 200:
# extract token
access_token = json.loads(res.text)['access_token']
logging.debug('Your access token is:\n%s', access_token)
return access_token
else:
logging.critical('ERROR: Could not obtain access token :(\n%d\n%s\n%s',
res.status_code, res.headers, res.text)
sys.exit(1)
def _send_portal_request(self, request_type='get', command='users', page=0, data=None,
dummy=False):
# method parameters
url = 'https://%s/%s/%s/%s' % (self.config['host'], self.config['endpoint'], command,
self.config['org_id'])
if page is not None:
url += '/%d' % page
if dummy:
url += '?testOnly=true'
headers = {
'Content-type' : 'application/json',
'Accept' : 'application/json',
'x-api-key' : self.config['api_key'],
'Authorization' : 'Bearer %s' % self.access_token,
}
# prepare body
if data:
body = json.dumps(data)
logging.debug('Size of body in KB: %f', sys.getsizeof(body) / 1024.0)
# send http request
if request_type == 'post':
res = requests.post(url, headers=headers, data=body)
elif request_type == 'get':
res = requests.get(url, headers=headers)
else:
logging.critical('ERROR: Unknown request type, should be GET or POST.')
sys.exit(1)
logging.debug('HTTP status code: %d', res.status_code)
logging.debug('HTTP response headers:\n%s', res.headers)
if res.status_code != 200:
logging.critical('The server rejected this request. Perhaps it\'s busy.')
sys.exit(1)
return res.text
def get_all_portal_users(self):
"""Get all the users from the portal and convert into a useful format.
Sets self.portal_users = {user emails: [list of groups]}
"""
completed = False
page = 0
users = []
# The portal returns up to 200 users per "page", the last page returns an
# attribute lastPage = True.
while not completed:
response = json.loads(self._send_portal_request(page=page))
if response.get('result') != 'success':
logging.critical('ERROR: Response from Adobe portal was bad: %s', response)
sys.exit(1)
completed = response.get('lastPage')
users.extend(response.get('users', []))
self.portal_users = {x['email'].lower(): x.get('groups', []) for x in users}
logging.debug('Portal users:\n%s', self.portal_users)
def compare_group_memberships(self, ad_groups):
"""Compares the provided AD group mappings with the Adobe portal mappings.
Args:
ad_groups: a dict of {'Adobe portal config name': [list of emails]}
"""
# Transform ad_groups into a dict of {email: [list of groups]}
ad_members_to_groups = {}
for group, members in ad_groups.items():
for member in members:
groups = ad_members_to_groups.get(member, [])
groups.append(group)
ad_members_to_groups[member] = groups
# Process AD groups for users who need adding to portal, or existing users
# who need additional products added to their profiles.
for group, members in ad_groups.items():
for member in members:
if group not in self.portal_users.get(member, []):
additions = self.user_mods_additions.get(member, [])
additions.append(group)
self.user_mods_additions[member] = additions
# Process AD groups for users who need to be removed from the portal, or
# who need to have one or more products removed from their profile.
for user, groups in self.portal_users.iteritems():
ad_groups = ad_members_to_groups.get(user)
if ad_groups:
groups_for_removal = set(groups) - set(ad_groups)
if groups_for_removal:
self.user_mods_subtractions[user] = list(groups_for_removal)
else:
# If the user isn't in any AD groups then they should be removed from
# all portal groups.
self.user_mods_subtractions[user] = groups
logging.debug('user_mods_additions: %s', self.user_mods_additions)
logging.debug('user_mods_subtractions: %s', self.user_mods_subtractions)
def generate_portal_commands(self, email_to_user_details):
"""Prepare the additions, deletions and removals."""
commands = []
# Process the additions first
for user, groups in self.user_mods_additions.iteritems():
commands.append(
{'user': user,
'do': [{'createFederatedID': {'country': 'US',
'email': user,
'firstname': email_to_user_details[user]['firstname'],
'lastname': email_to_user_details[user]['lastname'],
'option': 'ignoreIfAlreadyExists'}},
{'add': {'product': groups}}]}
)
# Process the removals
for user, groups in self.user_mods_subtractions.iteritems():
if user in self.user_mods_additions.keys():
# User already has an addition command so we'll add onto that.
for command_dict in commands:
if command_dict['user'] == user:
command_dict['do'].append(
{'remove': {'product': groups}}
)
break
else:
if set(self.portal_users[user]) == set(groups) and user not in UNREMOVABLE_USERS:
# All the user's groups are being removed, we should remove the user.
commands.append(
{'user': user,
'do': [{'removeFromOrg': {}}]}
)
else:
# Just some of the user's groups are being removed.
if groups:
commands.append(
{'user': user,
'do': [{'remove': {'product': groups}}]}
)
self.commands = commands
def preprocess_commands(self):
"""Preprocess list of commands to comply with API limitations.
There's a limit of 10 commands per request, 10 actions per command, and 10 products per
add / remove operation. We therefore need to re-process the commands into smaller chunks.
We haven't structured our commands in a way where there could be more than 10 actions per
command, so we really only need to care about more than 10 products per user.
Returns a list of lists of commands, no more than 10 per inner list:
[[command1, command2, ...], [command11, command 12, ...]]
"""
expanded_commands = []
for command in self.commands: # Look through each command...
operation_added = False
for operation in command['do']: # Look through each action...
# Check if this action is an add operation, and if it's adding more than 10 products.
if operation.get('add') and len(operation['add']['product']) > 10:
# We need to split the groups into batches of 10 and create a new command for each batch.
for groups in [operation['add']['product'][i:i + 10]
for i in xrange(0, len(operation['add']['product']), 10)]:
# Create a copy of the current command.
new_command = dict(command)
new_do_operations = []
for old_operation in command['do']: # Work through all the operations in the command
if old_operation.get('add'): # We want to replace add operations but leave others
new_do_operations.append({'add': {'product': groups}})
elif old_operation.get('remove'): # Except remove operations which we'll handle below
continue
else:
new_do_operations.append(old_operation)
new_command['do'] = new_do_operations
expanded_commands.append(new_command)
operation_added = True # Set a flag to show something changed.
# Same again, but this time with remove operations.
elif operation.get('remove') and len(operation['remove']['product']) > 10:
for groups in [operation['remove']['product'][i:i + 10]
for i in xrange(0, len(operation['remove']['product']), 10)]:
new_command = dict(command)
new_do_operations = []
for old_operation in command['do']:
if old_operation.get('remove'):
new_do_operations.append({'remove': {'product': groups}})
elif old_operation.get('add'):
continue
else:
new_do_operations.append(old_operation)
new_command['do'] = new_do_operations
expanded_commands.append(new_command)
operation_added = True
# This is an unusual constuct for...else - the else block gets executed once the for loop has
# completed.
else:
if not operation_added:
# Add the command if it didn't contain a add or remove block with more than 10 groups.
expanded_commands.append(command)
# Return the expanded_commands list chunked into lists of 10 commands.
return [expanded_commands[i:i + 10] for i in xrange(0, len(expanded_commands), 10)]
def run_commands(self, command_chunks, dummy=False):
"""Push a list of lists of commands to the portal."""
for command in command_chunks:
response_body = self._send_portal_request(request_type='post', command='action', page=None,
data=command, dummy=dummy)
response = json.loads(response_body)
if response['result'] == 'success':
logging.info('Request completed successfully.')
else:
logging.critical('Server response: %d completed, %d failed.',
response['completed'], response['notCompleted'])
for error in response['errors']:
logging.critical('User: %s Error: %s (%s)',
error['user'], error['message'], error['errorCode'])
def main(argv):
"""Main method for module."""
dummy = '--dummy' in argv
if '--debug' in argv:
logging.basicConfig(level=logging.DEBUG)
active_directory = ActiveDirectory()
ad_group_memberships = {}
for name, distinguished_name in SOFTWARE_GROUPS.items():
logging.debug('Processing %s...', name)
member_dns = active_directory.query_ldap_for_group(distinguished_name)
emails = active_directory.process_group_members(member_dns)
ad_group_memberships[name] = emails
logging.debug('ad_group_memberships:\n%s', ad_group_memberships)
logging.debug('email_to_user_details:\n%s', active_directory.email_to_user_details)
adobe_sync = AdobeSync()
adobe_sync.get_all_portal_users()
adobe_sync.compare_group_memberships(ad_group_memberships)
adobe_sync.generate_portal_commands(active_directory.email_to_user_details)
chunked_commands = adobe_sync.preprocess_commands()
adobe_sync.run_commands(chunked_commands, dummy)
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/env python
from setuptools import find_packages, setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='mathml-to-image-service',
version='1.0',
description='MathML to Image converter',
author='',
author_email='',
url='https://github.com/rudigiesler/mathml-to-image-service/',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Multimedia :: Graphics',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
],
packages=find_packages(),
install_requires=required
)
|
import base64
import fnmatch
import glob
import json
import os
import re
import shutil
import stat
import subprocess
import urllib.parse
import warnings
from datetime import datetime, timedelta
from distutils.util import strtobool
from packaging.version import Version
from pathlib import Path
from typing import Tuple, Any, Union, List, Dict, Optional
from zipfile import ZipFile, ZIP_DEFLATED
import git
import google.auth
import sys
import yaml
from google.cloud import storage
import Tests.Marketplace.marketplace_statistics as mp_statistics
from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \
PackTags, PackIgnored, Changelog, BASE_PACK_DEPENDENCY_DICT, SIEM_RULES_OBJECTS, PackStatus
from Utils.release_notes_generator import aggregate_release_notes_for_marketplace
from Tests.scripts.utils import logging_wrapper as logging
class Pack(object):
""" Class that manipulates and manages the upload of pack's artifact and metadata to cloud storage.
Args:
pack_name (str): Pack root folder name.
pack_path (str): Full path to pack folder.
Attributes:
PACK_INITIAL_VERSION (str): pack initial version that will be used as default.
CHANGELOG_JSON (str): changelog json full name, may be changed in the future.
README (str): pack's readme file name.
METADATA (str): pack's metadata file name, the one that will be deployed to cloud storage.
USER_METADATA (str); user metadata file name, the one that located in content repo.
EXCLUDE_DIRECTORIES (list): list of directories to excluded before uploading pack zip to storage.
AUTHOR_IMAGE_NAME (str): author image file name.
RELEASE_NOTES (str): release notes folder name.
"""
PACK_INITIAL_VERSION = "1.0.0"
CHANGELOG_JSON = "changelog.json"
README = "README.md"
USER_METADATA = "pack_metadata.json"
METADATA = "metadata.json"
AUTHOR_IMAGE_NAME = "Author_image.png"
EXCLUDE_DIRECTORIES = [PackFolders.TEST_PLAYBOOKS.value]
RELEASE_NOTES = "ReleaseNotes"
def __init__(self, pack_name, pack_path):
self._pack_name = pack_name
self._pack_path = pack_path
self._zip_path = None # zip_path will be updated as part of zip_pack
self._marketplaces = [] # initialized in load_user_metadata function
self._status = None
self._public_storage_path = ""
self._remove_files_list = [] # tracking temporary files, in order to delete in later step
self._server_min_version = "99.99.99" # initialized min version
self._latest_version = None # pack latest version found in changelog
self._support_type = None # initialized in load_user_metadata function
self._current_version = None # initialized in load_user_metadata function
self._hidden = False # initialized in load_user_metadata function
self._description = None # initialized in load_user_metadata function
self._display_name = None # initialized in load_user_metadata function
self._user_metadata = {} # initialized in load_user_metadata function
self._eula_link = None # initialized in load_user_metadata function
self._is_feed = False # a flag that specifies if pack is a feed pack
self._downloads_count = 0 # number of pack downloads
self._bucket_url = None # URL of where the pack was uploaded.
self._aggregated = False # weather the pack's rn was aggregated or not.
self._aggregation_str = "" # the aggregation string msg when the pack versions are aggregated
self._create_date = None # initialized in enhance_pack_attributes function
self._update_date = None # initialized in enhance_pack_attributes function
self._uploaded_author_image = False # whether the pack author image was uploaded or not
self._uploaded_integration_images = [] # the list of all integration images that were uploaded for the pack
self._support_details = None # initialized in enhance_pack_attributes function
self._author = None # initialized in enhance_pack_attributes function
self._certification = None # initialized in enhance_pack_attributes function
self._legacy = None # initialized in enhance_pack_attributes function
self._author_image = None # initialized in upload_author_image function
self._displayed_integration_images = [] # initialized in upload_integration_images function
self._price = 0 # initialized in enhance_pack_attributes function
self._is_private_pack = False # initialized in enhance_pack_attributes function
self._is_premium = False # initialized in enhance_pack_attributes function
self._vendor_id = None # initialized in enhance_pack_attributes function
self._partner_id = None # initialized in enhance_pack_attributes function
self._partner_name = None # initialized in enhance_pack_attributes function
self._content_commit_hash = None # initialized in enhance_pack_attributes function
self._preview_only = None # initialized in enhance_pack_attributes function
self._tags = None # initialized in enhance_pack_attributes function
self._categories = None # initialized in enhance_pack_attributes function
self._content_items = None # initialized in collect_content_items function
self._search_rank = None # initialized in enhance_pack_attributes function
self._related_integration_images = None # initialized in enhance_pack_attributes function
self._use_cases = None # initialized in enhance_pack_attributes function
self._keywords = None # initialized in enhance_pack_attributes function
self._pack_statistics_handler = None # initialized in enhance_pack_attributes function
self._contains_transformer = False # initialized in collect_content_items function
self._contains_filter = False # initialized in collect_content_items function
self._is_missing_dependencies = False # initialized in _load_pack_dependencies function
self._is_modified = None # initialized in detect_modified function
self._is_siem = False # initialized in collect_content_items function
# Dependencies attributes - these contain only packs that are a part of this marketplace
self._first_level_dependencies = {} # initialized in set_pack_dependencies function
self._all_levels_dependencies = [] # initialized in set_pack_dependencies function
self._displayed_images_dependent_on_packs = [] # initialized in set_pack_dependencies function
self._parsed_dependencies = None # initialized in enhance_pack_attributes function
@property
def name(self):
""" str: pack root folder name.
"""
return self._pack_name
@property
def path(self):
""" str: pack folder full path.
"""
return self._pack_path
@property
def latest_version(self):
""" str: pack latest version from sorted keys of changelog.json file.
"""
if not self._latest_version:
self._latest_version = self._get_latest_version()
return self._latest_version
else:
return self._latest_version
@latest_version.setter
def latest_version(self, latest_version):
self._latest_version = latest_version
@property
def status(self):
""" str: current status of the packs.
"""
return self._status
@property
def is_feed(self):
"""
bool: whether the pack is a feed pack
"""
return self._is_feed
@is_feed.setter
def is_feed(self, is_feed):
""" setter of is_feed
"""
self._is_feed = is_feed
@property
def is_siem(self):
"""
bool: whether the pack is a siem pack
"""
return self._is_siem
@is_siem.setter
def is_siem(self, is_siem):
""" setter of is_siem
"""
self._is_siem = is_siem
@status.setter # type: ignore[attr-defined,no-redef]
def status(self, status_value):
""" setter of pack current status.
"""
self._status = status_value
@property
def public_storage_path(self):
""" str: public gcs path of uploaded pack.
"""
return self._public_storage_path
@public_storage_path.setter
def public_storage_path(self, path_value):
""" setter of public gcs path of uploaded pack.
"""
self._public_storage_path = path_value
@property
def support_type(self):
""" str: support type of the pack.
"""
return self._support_type
@support_type.setter
def support_type(self, support_value):
""" setter of support type of the pack.
"""
self._support_type = support_value
@property
def current_version(self):
""" str: current version of the pack (different from latest_version).
"""
return self._current_version
@current_version.setter
def current_version(self, current_version_value):
""" setter of current version of the pack.
"""
self._current_version = current_version_value
@property
def hidden(self):
""" bool: internal content field for preventing pack from being displayed.
"""
return self._hidden
@hidden.setter
def hidden(self, hidden_value):
""" setter of hidden property of the pack.
"""
self._hidden = hidden_value
@property
def description(self):
""" str: Description of the pack (found in pack_metadata.json).
"""
return self._description
@description.setter
def description(self, description_value):
""" setter of description property of the pack.
"""
self._description = description_value
@property
def display_name(self):
""" str: Display name of the pack (found in pack_metadata.json).
"""
return self._display_name
@property
def user_metadata(self):
""" dict: the pack_metadata.
"""
return self._user_metadata
@display_name.setter # type: ignore[attr-defined,no-redef]
def display_name(self, display_name_value):
""" setter of display name property of the pack.
"""
self._display_name = display_name_value
@property
def server_min_version(self):
""" str: server min version according to collected items.
"""
if not self._server_min_version or self._server_min_version == "99.99.99":
return Metadata.SERVER_DEFAULT_MIN_VERSION
else:
return self._server_min_version
@property
def downloads_count(self):
""" str: packs downloads count.
"""
return self._downloads_count
@downloads_count.setter
def downloads_count(self, download_count_value):
""" setter of downloads count property of the pack.
"""
self._downloads_count = download_count_value
@property
def bucket_url(self):
""" str: pack bucket_url.
"""
return self._bucket_url
@bucket_url.setter
def bucket_url(self, bucket_url):
""" str: pack bucket_url.
"""
self._bucket_url = bucket_url
@property
def aggregated(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregated
@property
def aggregation_str(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregation_str
@property
def create_date(self):
""" str: pack create date.
"""
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def update_date(self):
""" str: pack update date.
"""
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
@property
def uploaded_author_image(self):
""" bool: whether the pack author image was uploaded or not.
"""
return self._uploaded_author_image
@uploaded_author_image.setter
def uploaded_author_image(self, uploaded_author_image):
""" bool: whether the pack author image was uploaded or not.
"""
self._uploaded_author_image = uploaded_author_image
@property
def uploaded_integration_images(self):
""" str: the list of uploaded integration images
"""
return self._uploaded_integration_images
@property
def is_missing_dependencies(self):
return self._is_missing_dependencies
@property
def zip_path(self):
return self._zip_path
@property
def is_modified(self):
return self._is_modified
@property
def marketplaces(self):
return self._marketplaces
@property
def all_levels_dependencies(self):
return self._all_levels_dependencies
def _get_latest_version(self):
""" Return latest semantic version of the pack.
In case that changelog.json file was not found, default value of 1.0.0 will be returned.
Otherwise, keys of semantic pack versions will be collected and sorted in descending and return latest version.
For additional information regarding changelog.json format go to issue #19786
Returns:
str: Pack latest version.
"""
changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if not os.path.exists(changelog_path):
return self._current_version
with open(changelog_path, "r") as changelog_file:
changelog = json.load(changelog_file)
pack_versions = [Version(v) for v in changelog.keys()]
pack_versions.sort(reverse=True)
return str(pack_versions[0])
@staticmethod
def organize_integration_images(pack_integration_images: list, pack_dependencies_integration_images_dict: dict,
pack_dependencies_by_download_count: list):
""" By Issue #32038
1. Sort pack integration images by alphabetical order
2. Sort pack dependencies by download count
Pack integration images are shown before pack dependencies integration images
Args:
pack_integration_images (list): list of pack integration images
pack_dependencies_integration_images_dict: a mapping of pack dependency name to its integration images
pack_dependencies_by_download_count: a list of pack dependencies sorted by download count
Returns:
list: list of sorted integration images
"""
def sort_by_name(integration_image: dict):
return integration_image.get('name', '')
# sort packs integration images
pack_integration_images = sorted(pack_integration_images, key=sort_by_name)
# sort pack dependencies integration images
all_dep_int_imgs = pack_integration_images
for dep_pack_name in pack_dependencies_by_download_count:
if dep_pack_name in pack_dependencies_integration_images_dict:
logging.info(f'Adding {dep_pack_name} to deps int imgs')
dep_int_imgs = sorted(pack_dependencies_integration_images_dict[dep_pack_name], key=sort_by_name)
for dep_int_img in dep_int_imgs:
if dep_int_img not in all_dep_int_imgs: # avoid duplicates
all_dep_int_imgs.append(dep_int_img)
return all_dep_int_imgs
@staticmethod
def _get_all_pack_images(pack_integration_images: List, display_dependencies_images: List,
dependencies_metadata: Dict,
pack_dependencies_by_download_count):
""" Returns data of uploaded pack integration images and it's path in gcs. Pack dependencies integration images
are added to that result as well.
Args:
pack_integration_images (list): list of uploaded to gcs integration images and it paths in gcs.
display_dependencies_images (list): list of pack names of additional dependencies images to display.
dependencies_metadata (dict): all level dependencies data.
pack_dependencies_by_download_count (list): list of pack names that are dependencies of the given pack
sorted by download count.
Returns:
list: collection of integration display name and it's path in gcs.
"""
dependencies_integration_images_dict: dict = {}
additional_dependencies_data = {k: v for k, v in dependencies_metadata.items() if k in
display_dependencies_images}
for dependency_data in additional_dependencies_data.values():
for dep_int_img in dependency_data.get('integrations', []):
dep_int_img_gcs_path = dep_int_img.get('imagePath', '') # image public url
dep_int_img['name'] = Pack.remove_contrib_suffix_from_name(dep_int_img.get('name', ''))
dep_pack_name = os.path.basename(os.path.dirname(dep_int_img_gcs_path))
if dep_pack_name not in display_dependencies_images:
continue # skip if integration image is not part of displayed images of the given pack
if dep_int_img not in pack_integration_images: # avoid duplicates in list
if dep_pack_name in dependencies_integration_images_dict:
dependencies_integration_images_dict[dep_pack_name].append(dep_int_img)
else:
dependencies_integration_images_dict[dep_pack_name] = [dep_int_img]
return Pack.organize_integration_images(
pack_integration_images, dependencies_integration_images_dict, pack_dependencies_by_download_count
)
def add_pack_type_tags(self, yaml_content, yaml_type):
"""
Checks if an pack objects is siem or feed object. If so, updates Pack._is_feed or Pack._is_siem
Args:
yaml_content: The yaml content extracted by yaml.safe_load().
yaml_type: The type of object to check.
Returns:
Doesn't return
"""
if yaml_type == 'Integration':
if yaml_content.get('script', {}).get('feed', False) is True:
self._is_feed = True
if yaml_content.get('isfetchevents', False) is True:
self._is_siem = True
if yaml_type == 'Playbook':
if yaml_content.get('name').startswith('TIM '):
self._is_feed = True
if yaml_type in SIEM_RULES_OBJECTS:
self._is_siem = True
@staticmethod
def _clean_release_notes(release_notes_lines):
return re.sub(r'<\!--.*?-->', '', release_notes_lines, flags=re.DOTALL)
@staticmethod
def _parse_pack_dependencies(first_level_dependencies, dependencies_metadata_dict):
""" Parses user defined dependencies and returns dictionary with relevant data about each dependency pack.
Args:
first_level_dependencies (dict): first lever dependencies that were retrieved
from user pack_metadata.json file.
dependencies_metadata_dict (dict): dict of pack dependencies data.
Returns:
dict: parsed dictionary with pack dependency data.
"""
parsed_result = {}
for dependency_id, dependency_data in dependencies_metadata_dict.items():
parsed_result[dependency_id] = {
"mandatory": first_level_dependencies.get(dependency_id, {}).get('mandatory', True),
"minVersion": dependency_data.get(Metadata.CURRENT_VERSION, Pack.PACK_INITIAL_VERSION),
"author": dependency_data.get('author', ''),
"name": dependency_data.get('name') if dependency_data.get('name') else dependency_id,
"certification": dependency_data.get('certification', 'certified')
}
return parsed_result
@staticmethod
def _create_support_section(support_type, support_url=None, support_email=None):
""" Creates support dictionary that is part of metadata.
In case of support type xsoar, adds default support url. If support is xsoar and support url is defined and
doesn't match xsoar default url, warning is raised.
Args:
support_type (str): support type of pack.
support_url (str): support full url.
support_email (str): support email address.
Returns:
dict: supported data dictionary.
"""
support_details = {}
if support_url: # set support url from user input
support_details['url'] = support_url
elif support_type == Metadata.XSOAR_SUPPORT: # in case support type is xsoar, set default xsoar support url
support_details['url'] = Metadata.XSOAR_SUPPORT_URL
# add support email if defined
if support_email:
support_details['email'] = support_email
return support_details
@staticmethod
def _get_author(support_type, author=None):
""" Returns pack author. In case support type is xsoar, more additional validation are applied.
Args:
support_type (str): support type of pack.
author (str): author of the pack.
Returns:
str: returns author from the input.
"""
if support_type == Metadata.XSOAR_SUPPORT and not author:
return Metadata.XSOAR_AUTHOR # returned xsoar default author
elif support_type == Metadata.XSOAR_SUPPORT and author != Metadata.XSOAR_AUTHOR:
logging.warning(f"{author} author doest not match {Metadata.XSOAR_AUTHOR} default value")
return author
else:
return author
@staticmethod
def _get_certification(support_type, certification=None):
""" Returns pack certification.
In case support type is xsoar or partner, CERTIFIED is returned.
In case support is not xsoar or partner but pack_metadata has certification field, certification value will be
taken from pack_metadata defined value.
Otherwise empty certification value (empty string) will be returned
Args:
support_type (str): support type of pack.
certification (str): certification value from pack_metadata, if exists.
Returns:
str: certification value
"""
if support_type in [Metadata.XSOAR_SUPPORT, Metadata.PARTNER_SUPPORT]:
return Metadata.CERTIFIED
elif certification:
return certification
else:
return ""
def _get_tags_from_landing_page(self, landing_page_sections: dict) -> set:
"""
Build the pack's tag list according to the user metadata and the landingPage sections file.
Args:
landing_page_sections (dict): landingPage sections and the packs in each one of them.
Returns:
set: Pack's tags.
"""
tags = set()
sections = landing_page_sections.get('sections', []) if landing_page_sections else []
for section in sections:
if self._pack_name in landing_page_sections.get(section, []):
tags.add(section)
return tags
def _parse_pack_metadata(self, build_number, commit_hash):
""" Parses pack metadata according to issue #19786 and #20091. Part of field may change over the time.
Args:
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
Returns:
dict: parsed pack metadata.
"""
pack_metadata = {
Metadata.NAME: self._display_name or self._pack_name,
Metadata.ID: self._pack_name,
Metadata.DESCRIPTION: self._description or self._pack_name,
Metadata.CREATED: self._create_date,
Metadata.UPDATED: self._update_date,
Metadata.LEGACY: self._legacy,
Metadata.SUPPORT: self._support_type,
Metadata.SUPPORT_DETAILS: self._support_details,
Metadata.EULA_LINK: self._eula_link,
Metadata.AUTHOR: self._author,
Metadata.AUTHOR_IMAGE: self._author_image,
Metadata.CERTIFICATION: self._certification,
Metadata.PRICE: self._price,
Metadata.SERVER_MIN_VERSION: self.user_metadata.get(Metadata.SERVER_MIN_VERSION) or self.server_min_version,
Metadata.CURRENT_VERSION: self.user_metadata.get(Metadata.CURRENT_VERSION, ''),
Metadata.VERSION_INFO: build_number,
Metadata.COMMIT: commit_hash,
Metadata.DOWNLOADS: self._downloads_count,
Metadata.TAGS: list(self._tags or []),
Metadata.CATEGORIES: self._categories,
Metadata.CONTENT_ITEMS: self._content_items,
Metadata.SEARCH_RANK: self._search_rank,
Metadata.INTEGRATIONS: self._related_integration_images,
Metadata.USE_CASES: self._use_cases,
Metadata.KEY_WORDS: self._keywords,
Metadata.DEPENDENCIES: self._parsed_dependencies,
Metadata.VIDEOS: self.user_metadata.get(Metadata.VIDEOS) or [],
}
if self._is_private_pack:
pack_metadata.update({
Metadata.PREMIUM: self._is_premium,
Metadata.VENDOR_ID: self._vendor_id,
Metadata.PARTNER_ID: self._partner_id,
Metadata.PARTNER_NAME: self._partner_name,
Metadata.CONTENT_COMMIT_HASH: self._content_commit_hash,
Metadata.PREVIEW_ONLY: self._preview_only
})
return pack_metadata
def _load_pack_dependencies_metadata(self, index_folder_path, packs_dict):
""" Loads dependencies metadata and returns mapping of pack id and it's loaded data.
There are 2 cases:
Case 1: The dependency is present in the index.zip. In this case, we add it to the dependencies results.
Case 2: The dependency is missing from the index.zip since it is a new pack. In this case, handle missing
dependency - This means we mark this pack as 'missing dependency', and once the new index.zip is
created, and therefore it contains the new pack, we call this function again, and hitting case 1.
Args:
index_folder_path (str): full path to download index folder.
packs_dict (dict): dict of all packs relevant for current marketplace, as {pack_id: pack_object}.
Returns:
dict: pack id as key and loaded metadata of packs as value.
bool: True if the pack is missing dependencies, False otherwise.
"""
dependencies_metadata_result = {}
dependencies_ids = {dep for dep in self._first_level_dependencies}
dependencies_ids.update(self._displayed_images_dependent_on_packs)
for dependency_pack_id in dependencies_ids:
dependency_metadata_path = os.path.join(index_folder_path, dependency_pack_id, Pack.METADATA)
if os.path.exists(dependency_metadata_path):
# Case 1: the dependency is found in the index.zip
with open(dependency_metadata_path, 'r') as metadata_file:
dependency_metadata = json.load(metadata_file)
dependencies_metadata_result[dependency_pack_id] = dependency_metadata
else:
# Case 2: the dependency is not in the index since it is a new pack
self._is_missing_dependencies = True
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} "
f"was not found in index, marking it as missing dependencies - to be resolved in "
f"next iteration over packs")
return dependencies_metadata_result, self._is_missing_dependencies
@staticmethod
def _get_updated_changelog_entry(changelog: dict, version: str, release_notes: str = None,
version_display_name: str = None, build_number_with_prefix: str = None,
released_time: str = None):
"""
Args:
changelog (dict): The changelog from the production bucket.
version (str): The version that is the key in the changelog of the entry wished to be updated.
release_notes (str): The release notes lines to update the entry with.
version_display_name (str): The version display name to update the entry with.
build_number_with_prefix(srt): the build number to modify the entry to, including the prefix R (if present).
released_time: The released time to update the entry with.
"""
changelog_entry = changelog.get(version)
if not changelog_entry:
raise Exception('The given version is not a key in the changelog')
version_display_name = \
version_display_name if version_display_name else changelog_entry[Changelog.DISPLAY_NAME].split('-')[0]
build_number_with_prefix = \
build_number_with_prefix if build_number_with_prefix else \
changelog_entry[Changelog.DISPLAY_NAME].split('-')[1]
changelog_entry[Changelog.RELEASE_NOTES] = release_notes if release_notes else changelog_entry[
Changelog.RELEASE_NOTES]
changelog_entry[Changelog.DISPLAY_NAME] = f'{version_display_name} - {build_number_with_prefix}'
changelog_entry[Changelog.RELEASED] = released_time if released_time else changelog_entry[Changelog.RELEASED]
return changelog_entry
def _create_changelog_entry(self, release_notes, version_display_name, build_number,
new_version=True, initial_release=False):
""" Creates dictionary entry for changelog.
Args:
release_notes (str): release notes md.
version_display_name (str): display name version.
build_number (srt): current build number.
new_version (bool): whether the entry is new or not. If not new, R letter will be appended to build number.
initial_release (bool): whether the entry is an initial release or not.
Returns:
dict: release notes entry of changelog
"""
if new_version:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
elif initial_release:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: self._create_date}
elif self.is_modified:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - R{build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
return {}
def remove_unwanted_files(self, delete_test_playbooks=True):
""" Iterates over pack folder and removes hidden files and unwanted folders.
Args:
delete_test_playbooks (bool): whether to delete test playbooks folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = True
try:
for directory in Pack.EXCLUDE_DIRECTORIES:
if delete_test_playbooks and os.path.isdir(f'{self._pack_path}/{directory}'):
shutil.rmtree(f'{self._pack_path}/{directory}')
logging.info(f"Deleted {directory} directory from {self._pack_name} pack")
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for pack_file in files:
full_file_path = os.path.join(root, pack_file)
# removing unwanted files
if pack_file.startswith('.') \
or pack_file in [Pack.AUTHOR_IMAGE_NAME, Pack.USER_METADATA] \
or pack_file in self._remove_files_list:
os.remove(full_file_path)
logging.info(f"Deleted pack {pack_file} file for {self._pack_name} pack")
continue
except Exception:
task_status = False
logging.exception(f"Failed to delete ignored files for pack {self._pack_name}")
finally:
return task_status
def sign_pack(self, signature_string=None):
""" Signs pack folder and creates signature file.
Args:
signature_string (str): Base64 encoded string used to sign the pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
if signature_string:
with open("keyfile", "wb") as keyfile:
keyfile.write(signature_string.encode())
arg = f'./signDirectory {self._pack_path} keyfile base64'
signing_process = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = signing_process.communicate()
if err:
logging.error(f"Failed to sign pack for {self._pack_name} - {str(err)}")
return
logging.info(f"Signed {self._pack_name} pack successfully")
else:
logging.info(f"No signature provided. Skipped signing {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed to sign pack for {self._pack_name}")
finally:
return task_status
@staticmethod
def zip_folder_items(source_path, source_name, zip_pack_path):
"""
Zips the source_path
Args:
source_path (str): The source path of the folder the items are in.
zip_pack_path (str): The path to the zip folder.
source_name (str): The name of the source that should be zipped.
"""
task_status = False
try:
with ZipFile(zip_pack_path, 'w', ZIP_DEFLATED) as pack_zip:
for root, dirs, files in os.walk(source_path, topdown=True):
for f in files:
full_file_path = os.path.join(root, f)
relative_file_path = os.path.relpath(full_file_path, source_path)
pack_zip.write(filename=full_file_path, arcname=relative_file_path)
task_status = True
logging.success(f"Finished zipping {source_name} folder.")
except Exception:
logging.exception(f"Failed in zipping {source_name} folder")
finally:
return task_status
@staticmethod
def encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
zip_pack_path (str): The path to the encrypted zip pack.
pack_name (str): The name of the pack that should be encrypted.
encryption_key (str): The key which we can decrypt the pack with.
extract_destination_path (str): The path in which the pack resides.
private_artifacts_dir (str): The chosen name for the private artifacts directory.
secondary_encryption_key (str) : A second key which we can decrypt the pack with.
"""
try:
current_working_dir = os.getcwd()
shutil.copy('./encryptor', os.path.join(extract_destination_path, 'encryptor'))
os.chmod(os.path.join(extract_destination_path, 'encryptor'), stat.S_IXOTH)
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./encryptor', shell=True)
output_file = zip_pack_path.replace("_not_encrypted.zip", ".zip")
full_command = f'./encryptor ./{pack_name}_not_encrypted.zip {output_file} "{encryption_key}"'
subprocess.call(full_command, shell=True)
secondary_encryption_key_output_file = zip_pack_path.replace("_not_encrypted.zip", ".enc2.zip")
full_command_with_secondary_encryption = f'./encryptor ./{pack_name}_not_encrypted.zip ' \
f'{secondary_encryption_key_output_file}' \
f' "{secondary_encryption_key}"'
subprocess.call(full_command_with_secondary_encryption, shell=True)
new_artefacts = os.path.join(current_working_dir, private_artifacts_dir)
if os.path.exists(new_artefacts):
shutil.rmtree(new_artefacts)
os.mkdir(path=new_artefacts)
shutil.copy(zip_pack_path, os.path.join(new_artefacts, f'{pack_name}_not_encrypted.zip'))
shutil.copy(output_file, os.path.join(new_artefacts, f'{pack_name}.zip'))
shutil.copy(secondary_encryption_key_output_file, os.path.join(new_artefacts, f'{pack_name}.enc2.zip'))
os.chdir(current_working_dir)
except (subprocess.CalledProcessError, shutil.Error) as error:
print(f"Error while trying to encrypt pack. {error}")
def decrypt_pack(self, encrypted_zip_pack_path, decryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the decryption succeeded.
"""
try:
current_working_dir = os.getcwd()
extract_destination_path = f'{current_working_dir}/decrypt_pack_dir'
os.mkdir(extract_destination_path)
shutil.copy('./decryptor', os.path.join(extract_destination_path, 'decryptor'))
secondary_encrypted_pack_path = os.path.join(extract_destination_path, 'encrypted_zip_pack.zip')
shutil.copy(encrypted_zip_pack_path, secondary_encrypted_pack_path)
os.chmod(os.path.join(extract_destination_path, 'decryptor'), stat.S_IXOTH)
output_decrypt_file_path = f"{extract_destination_path}/decrypt_pack.zip"
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./decryptor', shell=True)
full_command = f'./decryptor {secondary_encrypted_pack_path} {output_decrypt_file_path} "{decryption_key}"'
process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
shutil.rmtree(extract_destination_path)
os.chdir(current_working_dir)
if stdout:
logging.info(str(stdout))
if stderr:
logging.error(f"Error: Premium pack {self._pack_name} should be encrypted, but isn't.")
return False
return True
except subprocess.CalledProcessError as error:
logging.exception(f"Error while trying to decrypt pack. {error}")
return False
def is_pack_encrypted(self, encrypted_zip_pack_path, decryption_key):
""" Checks if the pack is encrypted by trying to decrypt it.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the pack is encrypted.
"""
return self.decrypt_pack(encrypted_zip_pack_path, decryption_key)
def zip_pack(self, extract_destination_path="", encryption_key="",
private_artifacts_dir='private_artifacts', secondary_encryption_key=""):
""" Zips pack folder.
Returns:
bool: whether the operation succeeded.
str: full path to created pack zip.
"""
self._zip_path = f"{self._pack_path}.zip" if not encryption_key else f"{self._pack_path}_not_encrypted.zip"
source_path = self._pack_path
source_name = self._pack_name
task_status = self.zip_folder_items(source_path, source_name, self._zip_path)
# if failed to zip, skip encryption
if task_status and encryption_key:
try:
Pack.encrypt_pack(self._zip_path, source_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key)
# If the pack needs to be encrypted, it is initially at a different location than this final path
except Exception:
task_status = False
logging.exception(f"Failed in encrypting {source_name} folder")
final_path_to_zipped_pack = f"{source_path}.zip"
return task_status, final_path_to_zipped_pack
def detect_modified(self, content_repo, index_folder_path, current_commit_hash, previous_commit_hash):
""" Detects pack modified files.
The diff is done between current commit and previous commit that was saved in metadata that was downloaded from
index. In case that no commit was found in index (initial run), the default value will be set to previous commit
from origin/master.
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): full path to downloaded index folder.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with.
Returns:
bool: whether the operation succeeded.
list: list of RN files that were modified.
bool: whether pack was modified and override will be required.
"""
task_status = False
modified_rn_files_paths = []
pack_was_modified = False
try:
pack_index_metadata_path = os.path.join(index_folder_path, self._pack_name, Pack.METADATA)
if not os.path.exists(pack_index_metadata_path):
logging.info(f"{self._pack_name} pack was not found in index, skipping detection of modified pack.")
task_status = True
return
with open(pack_index_metadata_path, 'r') as metadata_file:
downloaded_metadata = json.load(metadata_file)
previous_commit_hash = downloaded_metadata.get(Metadata.COMMIT, previous_commit_hash)
# set 2 commits by hash value in order to check the modified files of the diff
current_commit = content_repo.commit(current_commit_hash)
previous_commit = content_repo.commit(previous_commit_hash)
for modified_file in current_commit.diff(previous_commit):
if modified_file.a_path.startswith(PACKS_FOLDER):
modified_file_path_parts = os.path.normpath(modified_file.a_path).split(os.sep)
if modified_file_path_parts[1] and modified_file_path_parts[1] == self._pack_name:
if not is_ignored_pack_file(modified_file_path_parts):
logging.info(f"Detected modified files in {self._pack_name} pack")
task_status, pack_was_modified = True, True
modified_rn_files_paths.append(modified_file.a_path)
else:
logging.debug(f'{modified_file.a_path} is an ignored file')
task_status = True
if pack_was_modified:
# Make sure the modification is not only of release notes files, if so count that as not modified
pack_was_modified = not all(self.RELEASE_NOTES in path for path in modified_rn_files_paths)
# Filter modifications in release notes config JSON file - they will be handled later on.
modified_rn_files_paths = [path_ for path_ in modified_rn_files_paths if path_.endswith('.md')]
self._is_modified = pack_was_modified
return
except Exception:
logging.exception(f"Failed in detecting modified files of {self._pack_name} pack")
finally:
return task_status, modified_rn_files_paths
def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket, override_pack, storage_base_path,
private_content=False, pack_artifacts_path=None, overridden_upload_path=None):
""" Manages the upload of pack zip artifact to correct path in cloud storage.
The zip pack will be uploaded by defaualt to following path: /content/packs/pack_name/pack_latest_version.
In case that zip pack artifact already exist at constructed path, the upload will be skipped.
If flag override_pack is set to True, pack will forced for upload.
If item_upload_path is provided it will override said path, and will save the item to that destination.
Args:
zip_pack_path (str): full path to pack zip artifact.
latest_version (str): pack latest version.
storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
override_pack (bool): whether to override existing pack.
private_content (bool): Is being used in a private content build.
storage_base_path (str): The upload destination in the target bucket for all packs (in the format of
<some_path_in_the_target_bucket>/content/Packs).
pack_artifacts_path (str): Path to where we are saving pack artifacts.
overridden_upload_path (str): If provided, will override version_pack_path calculation and will use this path instead
Returns:
bool: whether the operation succeeded.
bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.
str: Path to pack's zip in the bucket after the upload.
"""
task_status = True
try:
if overridden_upload_path:
if private_content:
logging.warning("Private content does not support overridden argument")
return task_status, True, None
zip_to_upload_full_path = overridden_upload_path
else:
version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
existing_files = [Path(f.name).name for f in storage_bucket.list_blobs(prefix=version_pack_path)]
if override_pack:
logging.warning(f"Uploading {self._pack_name} pack to storage and overriding the existing pack "
f"files already in storage.")
elif existing_files:
logging.warning(f"The following packs already exist in the storage: {', '.join(existing_files)}")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return task_status, True, None
zip_to_upload_full_path = os.path.join(version_pack_path, f"{self._pack_name}.zip")
blob = storage_bucket.blob(zip_to_upload_full_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(zip_pack_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
if private_content:
secondary_encryption_key_pack_name = f"{self._pack_name}.enc2.zip"
secondary_encryption_key_bucket_path = os.path.join(version_pack_path,
secondary_encryption_key_pack_name)
# In some cases the path given is actually a zip.
if isinstance(pack_artifacts_path, str) and pack_artifacts_path.endswith('content_packs.zip'):
_pack_artifacts_path = pack_artifacts_path.replace('/content_packs.zip', '')
else:
_pack_artifacts_path = pack_artifacts_path
secondary_encryption_key_artifacts_path = zip_pack_path.replace(f'{self._pack_name}',
f'{self._pack_name}.enc2')
blob = storage_bucket.blob(secondary_encryption_key_bucket_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(secondary_encryption_key_artifacts_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
print(
f"Copying {secondary_encryption_key_artifacts_path} to {_pack_artifacts_path}/"
f"packs/{self._pack_name}.zip")
shutil.copy(secondary_encryption_key_artifacts_path,
f'{_pack_artifacts_path}/packs/{self._pack_name}.zip')
self.public_storage_path = blob.public_url
logging.success(f"Uploaded {self._pack_name} pack to {zip_to_upload_full_path} path.")
return task_status, False, zip_to_upload_full_path
except Exception:
task_status = False
logging.exception(f"Failed in uploading {self._pack_name} pack to gcs.")
return task_status, True, None
def copy_and_upload_to_storage(self, production_bucket, build_bucket, successful_packs_dict, storage_base_path,
build_bucket_base_path):
""" Manages the copy of pack zip artifact from the build bucket to the production bucket.
The zip pack will be copied to following path: /content/packs/pack_name/pack_latest_version if
the pack exists in the successful_packs_dict from Prepare content step in Create Instances job.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): google cloud production bucket.
build_bucket (google.cloud.storage.bucket.Bucket): google cloud build bucket.
successful_packs_dict (dict): the dict of all packs were uploaded in prepare content step
storage_base_path (str): The target destination of the upload in the target bucket.
build_bucket_base_path (str): The path of the build bucket in gcp.
Returns:
bool: Status - whether the operation succeeded.
bool: Skipped pack - true in case of pack existence at the targeted path and the copy process was skipped,
otherwise returned False.
"""
pack_not_uploaded_in_prepare_content = self._pack_name not in successful_packs_dict
if pack_not_uploaded_in_prepare_content:
logging.warning("The following packs already exist at storage.")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return True, True
latest_version = successful_packs_dict[self._pack_name][BucketUploadFlow.LATEST_VERSION]
self._latest_version = latest_version
build_version_pack_path = os.path.join(build_bucket_base_path, self._pack_name, latest_version)
# Verifying that the latest version of the pack has been uploaded to the build bucket
existing_bucket_version_files = [f.name for f in build_bucket.list_blobs(prefix=build_version_pack_path)]
if not existing_bucket_version_files:
logging.error(f"{self._pack_name} latest version ({latest_version}) was not found on build bucket at "
f"path {build_version_pack_path}.")
return False, False
# We upload the pack zip object taken from the build bucket into the production bucket
prod_version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
prod_pack_zip_path = os.path.join(prod_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_path = os.path.join(build_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_blob = build_bucket.blob(build_pack_zip_path)
try:
copied_blob = build_bucket.copy_blob(
blob=build_pack_zip_blob, destination_bucket=production_bucket, new_name=prod_pack_zip_path
)
copied_blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
self.public_storage_path = copied_blob.public_url
task_status = copied_blob.exists()
except Exception as e:
pack_suffix = os.path.join(self._pack_name, latest_version, f'{self._pack_name}.zip')
logging.exception(f"Failed copying {pack_suffix}. Additional Info: {str(e)}")
return False, False
if not task_status:
logging.error(f"Failed in uploading {self._pack_name} pack to production gcs.")
else:
# Determine if pack versions were aggregated during upload
pack_uploaded_in_prepare_content = not pack_not_uploaded_in_prepare_content
if pack_uploaded_in_prepare_content:
agg_str = successful_packs_dict[self._pack_name].get('aggregated')
if agg_str:
self._aggregated = True
self._aggregation_str = agg_str
logging.success(f"Uploaded {self._pack_name} pack to {prod_pack_zip_path} path.")
# handle dependenices zip upload when found in build bucket
self.copy_and_upload_dependencies_zip_to_storage(
build_bucket,
build_bucket_base_path,
production_bucket,
storage_base_path
)
return task_status, False
def copy_and_upload_dependencies_zip_to_storage(self, build_bucket, build_bucket_base_path, production_bucket,
storage_base_path):
pack_with_deps_name = f'{self._pack_name}_with_dependencies.zip'
build_pack_with_deps_path = os.path.join(build_bucket_base_path, self._pack_name, pack_with_deps_name)
existing_bucket_deps_files = [f.name for f in build_bucket.list_blobs(prefix=build_pack_with_deps_path)]
if existing_bucket_deps_files:
logging.info(f"{self._pack_name} with dependencies was found. path {build_pack_with_deps_path}.")
# We upload the pack dependencies zip object taken from the build bucket into the production bucket
prod_version_pack_deps_zip_path = os.path.join(storage_base_path, self._pack_name, pack_with_deps_name)
build_pack_deps_zip_blob = build_bucket.blob(build_pack_with_deps_path)
try:
copied_blob = build_bucket.copy_blob(
blob=build_pack_deps_zip_blob,
destination_bucket=production_bucket,
new_name=prod_version_pack_deps_zip_path
)
copied_blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
self.public_storage_path = copied_blob.public_url
dep_task_status = copied_blob.exists()
if not dep_task_status:
logging.error(f"Failed in uploading {self._pack_name} pack with dependencies to production gcs.")
except Exception as e:
pack_deps_zip_suffix = os.path.join(self._pack_name, pack_with_deps_name)
logging.exception(f"Failed copying {pack_deps_zip_suffix}. Additional Info: {str(e)}")
def get_changelog_latest_rn(self, changelog_index_path: str) -> Tuple[dict, Version, str]:
"""
Returns the changelog file contents and the last version of rn in the changelog file
Args:
changelog_index_path (str): the changelog.json file path in the index
Returns: the changelog file contents, the last version, and contents of rn in the changelog file
"""
logging.info(f"Found Changelog for: {self._pack_name}")
if os.path.exists(changelog_index_path):
try:
with open(changelog_index_path, "r") as changelog_file:
changelog = json.load(changelog_file)
except json.JSONDecodeError:
changelog = {}
else:
changelog = {}
# get the latest rn version in the changelog.json file
changelog_rn_versions = [Version(ver) for ver in changelog]
# no need to check if changelog_rn_versions isn't empty because changelog file exists
changelog_latest_rn_version = max(changelog_rn_versions)
changelog_latest_rn = changelog[str(changelog_latest_rn_version)]["releaseNotes"]
return changelog, changelog_latest_rn_version, changelog_latest_rn
def get_modified_release_notes_lines(self, release_notes_dir: str, new_release_notes_versions: list,
changelog: dict, modified_rn_files: list):
"""
In the case where an rn file was changed, this function returns the new content
of the release note in the format suitable for the changelog file.
In general, if two rn files are created between two consecutive upload runs (i.e. pack was changed twice),
the rn files are being aggregated and the latter version is the one that is being used as a key in the changelog
file, and the aggregated rns as the value.
Hence, in the case of changing an rn as such, this function re-aggregates all of the rns under the
corresponding version key, and returns the aggregated data, in the right format, as value under that key.
Args:
release_notes_dir (str): the path to the release notes dir
new_release_notes_versions (list): a list of the new versions of release notes in the pack since the
last upload. This means they were already handled on this upload run (and aggregated if needed).
changelog (dict): the changelog from the production bucket.
modified_rn_files (list): a list of the rn files that were modified according to the last commit in
'filename.md' format.
Returns:
A dict of modified version and their release notes contents, for modified
in the current index file
"""
modified_versions_dict = {}
for rn_filename in modified_rn_files:
version = underscore_file_name_to_dotted_version(rn_filename)
# Should only apply on modified files that are not the last rn file
if version in new_release_notes_versions:
continue
# The case where the version is a key in the changelog file,
# and the value is not an aggregated release note
if is_the_only_rn_in_block(release_notes_dir, version, changelog):
logging.info("The version is a key in the changelog file and by itself in the changelog block")
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
modified_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
# The case where the version is not a key in the changelog file or it is a key of aggregated content
else:
logging.debug(f'The "{version}" version is not a key in the changelog file or it is a key of'
f' aggregated content')
same_block_versions_dict, higher_nearest_version = self.get_same_block_versions(
release_notes_dir, version, changelog)
modified_versions_dict[higher_nearest_version] = aggregate_release_notes_for_marketplace(
same_block_versions_dict)
return modified_versions_dict
def get_same_block_versions(self, release_notes_dir: str, version: str, changelog: dict):
"""
Get a dict of the version as key and rn data as value of all of the versions that are in the same
block in the changelog file as the given version (these are the versions that were aggregates together
during a single upload priorly).
Args:
release_notes_dir (str): the path to the release notes dir
version (str): the wanted version
changelog (dict): the changelog from the production bucket.
Returns:
A dict of version, rn data for all corresponding versions, and the highest version among those keys as str
"""
lowest_version = [Version(Pack.PACK_INITIAL_VERSION)]
lower_versions: list = []
higher_versions: list = []
same_block_versions_dict: dict = dict()
for item in changelog.keys(): # divide the versions into lists of lower and higher than given version
(lower_versions if Version(item) < Version(version) else higher_versions).append(Version(item))
higher_nearest_version = min(higher_versions)
lower_versions = lower_versions + lowest_version # if the version is 1.0.0, ensure lower_versions is not empty
lower_nearest_version = max(lower_versions)
for rn_filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(rn_filename)
# Catch all versions that are in the same block
if lower_nearest_version < Version(current_version) <= higher_nearest_version:
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
same_block_versions_dict[current_version] = self._clean_release_notes(rn_lines).strip()
return same_block_versions_dict, str(higher_nearest_version)
def get_release_notes_lines(self, release_notes_dir: str, changelog_latest_rn_version: Version,
changelog_latest_rn: str) -> Tuple[str, str, list]:
"""
Prepares the release notes contents for the new release notes entry
Args:
release_notes_dir (str): the path to the release notes dir
changelog_latest_rn_version (Version): the last version of release notes in the changelog.json file
changelog_latest_rn (str): the last release notes in the changelog.json file
Returns: The release notes contents, the latest release notes version (in the release notes directory),
and a list of the new rn versions that this is the first time they have been uploaded.
"""
found_versions: list = list()
pack_versions_dict: dict = dict()
for filename in sorted(filter_dir_files_by_extension(release_notes_dir, '.md')):
version = underscore_file_name_to_dotted_version(filename)
# Aggregate all rn files that are bigger than what we have in the changelog file
if Version(version) > changelog_latest_rn_version:
with open(os.path.join(release_notes_dir, filename), 'r') as rn_file:
rn_lines = rn_file.read()
pack_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
found_versions.append(Version(version))
latest_release_notes_version = max(found_versions)
latest_release_notes_version_str = str(latest_release_notes_version)
logging.info(f"Latest ReleaseNotes version is: {latest_release_notes_version_str}")
if len(pack_versions_dict) > 1:
# In case that there is more than 1 new release notes file, wrap all release notes together for one
# changelog entry
aggregation_str = f"[{', '.join(str(lv) for lv in found_versions if lv > changelog_latest_rn_version)}]"\
f" => {latest_release_notes_version_str}"
logging.info(f"Aggregating ReleaseNotes versions: {aggregation_str}")
release_notes_lines = aggregate_release_notes_for_marketplace(pack_versions_dict)
self._aggregated = True
self._aggregation_str = aggregation_str
elif len(pack_versions_dict) == 1:
# In case where there is only one new release notes file
release_notes_lines = pack_versions_dict[latest_release_notes_version_str]
else:
# In case where the pack is up to date, i.e. latest changelog is latest rn file
# We should take the release notes from the index as it has might been aggregated
logging.info(f'No new RN file was detected for pack {self._pack_name}, taking latest RN from the index')
release_notes_lines = changelog_latest_rn
new_release_notes_versions = list(pack_versions_dict.keys())
return release_notes_lines, latest_release_notes_version_str, new_release_notes_versions
def assert_upload_bucket_version_matches_release_notes_version(self,
changelog: dict,
latest_release_notes: str) -> None:
"""
Sometimes there is a the current bucket is not merged from master there could be another version in the upload
bucket, that does not exist in the current branch.
This case can cause unpredicted behavior and we want to fail the build.
This method validates that this is not the case in the current build, and if it does - fails it with an
assertion error.
Args:
changelog: The changelog from the production bucket.
latest_release_notes: The latest release notes version string in the current branch
"""
changelog_latest_release_notes = max(changelog, key=lambda k: Version(k)) # pylint: disable=W0108
assert Version(latest_release_notes) >= Version(changelog_latest_release_notes), \
f'{self._pack_name}: Version mismatch detected between upload bucket and current branch\n' \
f'Upload bucket version: {changelog_latest_release_notes}\n' \
f'current branch version: {latest_release_notes}\n' \
'Please Merge from master and rebuild'
def get_rn_files_names(self, modified_rn_files_paths):
"""
Args:
modified_rn_files_paths: a list containing all modified files in the current pack, generated
by comparing the old and the new commit hash.
Returns:
The names of the modified release notes files out of the given list only,
as in the names of the files that are under ReleaseNotes directory in the format of 'filename.md'.
"""
modified_rn_files = []
for file_path in modified_rn_files_paths:
modified_file_path_parts = os.path.normpath(file_path).split(os.sep)
if self.RELEASE_NOTES in modified_file_path_parts:
modified_rn_files.append(modified_file_path_parts[-1])
return modified_rn_files
def prepare_release_notes(self, index_folder_path, build_number,
modified_rn_files_paths=None):
"""
Handles the creation and update of the changelog.json files.
Args:
index_folder_path (str): Path to the unzipped index json.
build_number (str): circleCI build number.
modified_rn_files_paths (list): list of paths of the pack's modified file
Returns:
bool: whether the operation succeeded.
bool: whether running build has not updated pack release notes.
"""
task_status = False
not_updated_build = False
release_notes_dir = os.path.join(self._pack_path, Pack.RELEASE_NOTES)
modified_rn_files_paths = modified_rn_files_paths if modified_rn_files_paths else []
try:
# load changelog from downloaded index
logging.info(f"Loading changelog for {self._pack_name} pack")
changelog_index_path = os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
if os.path.exists(changelog_index_path):
changelog, changelog_latest_rn_version, changelog_latest_rn = \
self.get_changelog_latest_rn(changelog_index_path)
if os.path.exists(release_notes_dir):
# Handling latest release notes files
release_notes_lines, latest_release_notes, new_release_notes_versions = \
self.get_release_notes_lines(
release_notes_dir, changelog_latest_rn_version, changelog_latest_rn)
self.assert_upload_bucket_version_matches_release_notes_version(changelog, latest_release_notes)
# Handling modified old release notes files, if there are any
rn_files_names = self.get_rn_files_names(modified_rn_files_paths)
modified_release_notes_lines_dict = self.get_modified_release_notes_lines(
release_notes_dir, new_release_notes_versions, changelog, rn_files_names)
if self._current_version != latest_release_notes:
logging.error(f"Version mismatch detected between the pack's current version in "
f"pack_metadata.json: {self._current_version} and latest release notes "
f"version: {latest_release_notes}.")
task_status = False
return task_status, not_updated_build
else:
if latest_release_notes in changelog:
logging.debug(f"Found existing release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
new_version=False)
else:
logging.info(f"Created new release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
new_version=True)
if version_changelog:
changelog[latest_release_notes] = version_changelog
if modified_release_notes_lines_dict:
logging.info("Updating changelog entries for modified release notes")
for version, modified_release_notes_lines in modified_release_notes_lines_dict.items():
updated_entry = self._get_updated_changelog_entry(
changelog, version, release_notes=modified_release_notes_lines)
changelog[version] = updated_entry
else:
if len(changelog.keys()) > 1:
# If there is no release notes dir but the changelog has a few entries in it,
# there is a mismatch
logging.warning(
f"{self._pack_name} pack mismatch between {Pack.CHANGELOG_JSON} and {Pack.RELEASE_NOTES}")
task_status, not_updated_build = True, True
return task_status, not_updated_build
else:
# allow changing the initial changelog version
first_key_in_changelog = list(changelog.keys())[0]
changelog[first_key_in_changelog] = self._create_changelog_entry(
release_notes=self.description,
version_display_name=first_key_in_changelog,
build_number=build_number,
initial_release=True,
new_version=False)
logging.info(f"Found existing release notes in {Pack.CHANGELOG_JSON} for version: "
f"{first_key_in_changelog} of pack {self._pack_name}. Modifying this version in "
f"{Pack.CHANGELOG_JSON}")
elif self._hidden:
logging.warning(f"Pack {self._pack_name} is deprecated. Skipping release notes handling.")
task_status = True
not_updated_build = True
return task_status, not_updated_build
else:
# if there is no changelog file for the pack, this is a new pack, and we start it's changelog at it's
# current version
version_changelog = self._create_changelog_entry(
release_notes=self.description,
version_display_name=self._current_version,
build_number=build_number,
new_version=True,
initial_release=True
)
changelog = {
self._current_version: version_changelog
}
logging.info(f'Created {Pack.CHANGELOG_JSON} for pack {self._pack_name} starting at version'
f' {self._current_version}')
# Update change log entries with BC flag.
self.add_bc_entries_if_needed(release_notes_dir, changelog)
# write back changelog with changes to pack folder
with open(os.path.join(self._pack_path, Pack.CHANGELOG_JSON), "w") as pack_changelog:
json.dump(changelog, pack_changelog, indent=4)
task_status = True
logging.success(f"Finished creating {Pack.CHANGELOG_JSON} for {self._pack_name}")
except Exception as e:
logging.error(f"Failed creating {Pack.CHANGELOG_JSON} file for {self._pack_name}.\n "
f"Additional info: {e}")
finally:
return task_status, not_updated_build
def create_local_changelog(self, build_index_folder_path):
""" Copies the pack index changelog.json file to the pack path
Args:
build_index_folder_path: The path to the build index folder
Returns:
bool: whether the operation succeeded.
"""
task_status = True
build_changelog_index_path = os.path.join(build_index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
pack_changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if os.path.exists(build_changelog_index_path):
try:
shutil.copyfile(src=build_changelog_index_path, dst=pack_changelog_path)
logging.success(f"Successfully copied pack index changelog.json file from {build_changelog_index_path}"
f" to {pack_changelog_path}.")
except shutil.Error as e:
task_status = False
logging.error(f"Failed copying changelog.json file from {build_changelog_index_path} to "
f"{pack_changelog_path}. Additional info: {str(e)}")
return task_status
else:
task_status = False
logging.error(
f"{self._pack_name} index changelog file is missing in build bucket path: {build_changelog_index_path}")
return task_status and self.is_changelog_exists()
def collect_content_items(self):
""" Iterates over content items folders inside pack and collects content items data.
Returns:
dict: Parsed content items
.
"""
task_status = False
content_items_result: dict = {}
try:
# the format is defined in issue #19786, may change in the future
content_item_name_mapping = {
PackFolders.SCRIPTS.value: "automation",
PackFolders.PLAYBOOKS.value: "playbook",
PackFolders.INTEGRATIONS.value: "integration",
PackFolders.INCIDENT_FIELDS.value: "incidentfield",
PackFolders.INCIDENT_TYPES.value: "incidenttype",
PackFolders.DASHBOARDS.value: "dashboard",
PackFolders.INDICATOR_FIELDS.value: "indicatorfield",
PackFolders.REPORTS.value: "report",
PackFolders.INDICATOR_TYPES.value: "reputation",
PackFolders.LAYOUTS.value: "layoutscontainer",
PackFolders.CLASSIFIERS.value: "classifier",
PackFolders.WIDGETS.value: "widget",
PackFolders.GENERIC_DEFINITIONS.value: "genericdefinition",
PackFolders.GENERIC_FIELDS.value: "genericfield",
PackFolders.GENERIC_MODULES.value: "genericmodule",
PackFolders.GENERIC_TYPES.value: "generictype",
PackFolders.LISTS.value: "list",
PackFolders.PREPROCESS_RULES.value: "preprocessrule",
PackFolders.JOBS.value: "job",
PackFolders.PARSING_RULES.value: "parsingrule",
PackFolders.MODELING_RULES.value: "modelingrule",
PackFolders.CORRELATION_RULES.value: "correlationrule",
PackFolders.XSIAM_DASHBOARDS.value: "xsiamdashboard",
PackFolders.XSIAM_REPORTS.value: "xsiamreport",
PackFolders.TRIGGERS.value: "trigger",
PackFolders.WIZARDS.value: "wizard",
}
for root, pack_dirs, pack_files_names in os.walk(self._pack_path, topdown=False):
current_directory = root.split(os.path.sep)[-1]
parent_directory = root.split(os.path.sep)[-2]
if parent_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
current_directory = parent_directory
elif current_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
continue
folder_collected_items = []
for pack_file_name in pack_files_names:
if not pack_file_name.endswith(('.json', '.yml')):
continue
pack_file_path = os.path.join(root, pack_file_name)
# reputation in old format aren't supported in 6.0.0 server version
if current_directory == PackFolders.INDICATOR_TYPES.value \
and not fnmatch.fnmatch(pack_file_name, 'reputation-*.json'):
os.remove(pack_file_path)
logging.info(f"Deleted pack {pack_file_name} reputation file for {self._pack_name} pack")
continue
with open(pack_file_path, 'r') as pack_file:
if current_directory in PackFolders.yml_supported_folders():
content_item = yaml.safe_load(pack_file)
elif current_directory in PackFolders.json_supported_folders():
content_item = json.load(pack_file)
else:
continue
# check if content item has to version
to_version = content_item.get('toversion') or content_item.get('toVersion')
if to_version and Version(to_version) < Version(Metadata.SERVER_DEFAULT_MIN_VERSION):
os.remove(pack_file_path)
logging.info(
f"{self._pack_name} pack content item {pack_file_name} has to version: {to_version}. "
f"{pack_file_name} file was deleted.")
continue
if current_directory not in PackFolders.pack_displayed_items():
continue # skip content items that are not displayed in contentItems
logging.debug(
f"Iterating over {pack_file_path} file and collecting items of {self._pack_name} pack")
# updated min server version from current content item
self._server_min_version = get_updated_server_version(self._server_min_version, content_item,
self._pack_name)
content_item_tags = content_item.get('tags', [])
if current_directory == PackFolders.SCRIPTS.value:
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('comment', ''),
'tags': content_item_tags,
})
if not self._contains_transformer and 'transformer' in content_item_tags:
self._contains_transformer = True
if not self._contains_filter and 'filter' in content_item_tags:
self._contains_filter = True
elif current_directory == PackFolders.PLAYBOOKS.value:
self.add_pack_type_tags(content_item, 'Playbook')
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INTEGRATIONS.value:
integration_commands = content_item.get('script', {}).get('commands', [])
self.add_pack_type_tags(content_item, 'Integration')
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('display', ''),
'description': content_item.get('description', ''),
'category': content_item.get('category', ''),
'commands': [
{'name': c.get('name', ''), 'description': c.get('description', '')}
for c in integration_commands],
})
elif current_directory == PackFolders.INCIDENT_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INCIDENT_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'playbook': content_item.get('playbookId', ''),
'closureScript': content_item.get('closureScript', ''),
'hours': int(content_item.get('hours', 0)),
'days': int(content_item.get('days', 0)),
'weeks': int(content_item.get('weeks', 0)),
})
elif current_directory == PackFolders.DASHBOARDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
})
elif current_directory == PackFolders.INDICATOR_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.REPORTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INDICATOR_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'details': content_item.get('details', ''),
'reputationScriptName': content_item.get('reputationScriptName', ''),
'enhancementScriptNames': content_item.get('enhancementScriptNames', []),
})
elif current_directory == PackFolders.LAYOUTS.value:
layout_metadata = {
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
}
layout_description = content_item.get('description')
if layout_description is not None:
layout_metadata['description'] = layout_description
folder_collected_items.append(layout_metadata)
elif current_directory == PackFolders.CLASSIFIERS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name') or content_item.get('id', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.WIDGETS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'dataType': content_item.get('dataType', ''),
'widgetType': content_item.get('widgetType', ''),
})
elif current_directory == PackFolders.LISTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', '')
})
elif current_directory == PackFolders.GENERIC_DEFINITIONS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
'type': content_item.get('type', ''),
})
elif current_directory == PackFolders.GENERIC_MODULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.PREPROCESS_RULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.JOBS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
# note that `name` may technically be blank, but shouldn't pass validations
'name': content_item.get('name', ''),
'details': content_item.get('details', ''),
})
elif current_directory == PackFolders.PARSING_RULES.value:
self.add_pack_type_tags(content_item, 'ParsingRule')
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
})
elif current_directory == PackFolders.MODELING_RULES.value:
self.add_pack_type_tags(content_item, 'ModelingRule')
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
})
elif current_directory == PackFolders.CORRELATION_RULES.value:
self.add_pack_type_tags(content_item, 'CorrelationRule')
folder_collected_items.append({
'id': content_item.get('global_rule_id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.XSIAM_DASHBOARDS.value:
folder_collected_items.append({
'id': content_item.get('dashboards_data', [{}])[0].get('global_id', ''),
'name': content_item.get('dashboards_data', [{}])[0].get('name', ''),
'description': content_item.get('dashboards_data', [{}])[0].get('description', ''),
})
elif current_directory == PackFolders.XSIAM_REPORTS.value:
folder_collected_items.append({
'id': content_item.get('templates_data', [{}])[0].get('global_id', ''),
'name': content_item.get('templates_data', [{}])[0].get('report_name', ''),
'description': content_item.get('templates_data', [{}])[0].get('report_description', ''),
})
elif current_directory == PackFolders.TRIGGERS.value:
folder_collected_items.append({
'id': content_item.get('trigger_id', ''),
'name': content_item.get('trigger_name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.WIZARDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
'dependency_packs': content_item.get('dependency_packs', {})
})
else:
logging.info(f'Failed to collect: {current_directory}')
if current_directory in PackFolders.pack_displayed_items():
content_item_key = content_item_name_mapping[current_directory]
content_items_result[content_item_key] = \
content_items_result.get(content_item_key, []) + folder_collected_items
logging.success(f"Finished collecting content items for {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed collecting content items in {self._pack_name} pack")
finally:
self._content_items = content_items_result
return task_status
def load_user_metadata(self):
""" Loads user defined metadata and stores part of it's data in defined properties fields.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
user_metadata = {}
try:
user_metadata_path = os.path.join(self._pack_path, Pack.USER_METADATA) # user metadata path before parsing
if not os.path.exists(user_metadata_path):
logging.error(f"{self._pack_name} pack is missing {Pack.USER_METADATA} file.")
return task_status
with open(user_metadata_path, "r") as user_metadata_file:
user_metadata = json.load(user_metadata_file) # loading user metadata
# part of old packs are initialized with empty list
user_metadata = {} if isinstance(user_metadata, list) else user_metadata
# store important user metadata fields
self.support_type = user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self.current_version = user_metadata.get(Metadata.CURRENT_VERSION, '')
self.hidden = user_metadata.get(Metadata.HIDDEN, False)
self.description = user_metadata.get(Metadata.DESCRIPTION, False)
self.display_name = user_metadata.get(Metadata.NAME, '') # type: ignore[misc]
self._user_metadata = user_metadata
self._eula_link = user_metadata.get(Metadata.EULA_LINK, Metadata.EULA_URL)
self._marketplaces = user_metadata.get('marketplaces', ['xsoar'])
logging.info(f"Finished loading {self._pack_name} pack user metadata")
task_status = True
except Exception:
logging.exception(f"Failed in loading {self._pack_name} user metadata.")
finally:
return task_status
def _collect_pack_tags(self, user_metadata, landing_page_sections, trending_packs):
tags = set(input_to_list(input_data=user_metadata.get('tags')))
tags |= self._get_tags_from_landing_page(landing_page_sections)
tags |= {PackTags.TIM} if self._is_feed else set()
tags |= {PackTags.USE_CASE} if self._use_cases else set()
tags |= {PackTags.TRANSFORMER} if self._contains_transformer else set()
tags |= {PackTags.FILTER} if self._contains_filter else set()
tags |= {PackTags.COLLECTION} if self._is_siem else set()
if self._create_date:
days_since_creation = (datetime.utcnow() - datetime.strptime(self._create_date, Metadata.DATE_FORMAT)).days
if days_since_creation <= 30:
tags |= {PackTags.NEW}
else:
tags -= {PackTags.NEW}
if trending_packs:
if self._pack_name in trending_packs:
tags |= {PackTags.TRENDING}
else:
tags -= {PackTags.TRENDING}
return tags
def _enhance_pack_attributes(self, index_folder_path, dependencies_metadata_dict,
statistics_handler=None, format_dependencies_only=False):
""" Enhances the pack object with attributes for the metadata file
Args:
dependencies_metadata_dict (dict): mapping of pack dependencies metadata, for first level dependencies.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
dict: parsed pack metadata.
"""
landing_page_sections = mp_statistics.StatisticsHandler.get_landing_page_sections()
trending_packs = None
pack_dependencies_by_download_count = self._displayed_images_dependent_on_packs
if not format_dependencies_only:
# ===== Pack Regular Attributes =====
self._support_type = self.user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self._support_details = self._create_support_section(
support_type=self._support_type, support_url=self.user_metadata.get(Metadata.URL),
support_email=self.user_metadata.get(Metadata.EMAIL)
)
self._author = self._get_author(
support_type=self._support_type, author=self.user_metadata.get(Metadata.AUTHOR, ''))
self._certification = self._get_certification(
support_type=self._support_type, certification=self.user_metadata.get(Metadata.CERTIFICATION)
)
self._legacy = self.user_metadata.get(Metadata.LEGACY, True)
self._create_date = self._get_pack_creation_date(index_folder_path)
self._update_date = self._get_pack_update_date(index_folder_path)
self._use_cases = input_to_list(input_data=self.user_metadata.get(Metadata.USE_CASES), capitalize_input=True)
self._categories = input_to_list(input_data=self.user_metadata.get(Metadata.CATEGORIES), capitalize_input=True)
self._keywords = input_to_list(self.user_metadata.get(Metadata.KEY_WORDS))
self._parsed_dependencies = self._parse_pack_dependencies(self.user_metadata.get(Metadata.DEPENDENCIES, {}),
dependencies_metadata_dict)
# ===== Pack Private Attributes =====
if not format_dependencies_only:
self._is_private_pack = Metadata.PARTNER_ID in self.user_metadata
self._is_premium = self._is_private_pack
self._preview_only = get_valid_bool(self.user_metadata.get(Metadata.PREVIEW_ONLY, False))
self._price = convert_price(pack_id=self._pack_name, price_value_input=self.user_metadata.get('price'))
if self._is_private_pack:
self._vendor_id = self.user_metadata.get(Metadata.VENDOR_ID, "")
self._partner_id = self.user_metadata.get(Metadata.PARTNER_ID, "")
self._partner_name = self.user_metadata.get(Metadata.PARTNER_NAME, "")
self._content_commit_hash = self.user_metadata.get(Metadata.CONTENT_COMMIT_HASH, "")
# Currently all content packs are legacy.
# Since premium packs cannot be legacy, we directly set this attribute to false.
self._legacy = False
# ===== Pack Statistics Attributes =====
if not self._is_private_pack and statistics_handler: # Public Content case
self._pack_statistics_handler = mp_statistics.PackStatisticsHandler(
self._pack_name, statistics_handler.packs_statistics_df, statistics_handler.packs_download_count_desc,
self._displayed_images_dependent_on_packs
)
self._downloads_count = self._pack_statistics_handler.download_count
trending_packs = statistics_handler.trending_packs
pack_dependencies_by_download_count = self._pack_statistics_handler.displayed_dependencies_sorted
self._tags = self._collect_pack_tags(self.user_metadata, landing_page_sections, trending_packs)
self._search_rank = mp_statistics.PackStatisticsHandler.calculate_search_rank(
tags=self._tags, certification=self._certification, content_items=self._content_items
)
self._related_integration_images = self._get_all_pack_images(
self._displayed_integration_images, self._displayed_images_dependent_on_packs, dependencies_metadata_dict,
pack_dependencies_by_download_count
)
def format_metadata(self, index_folder_path, packs_dependencies_mapping, build_number, commit_hash,
statistics_handler, packs_dict=None, marketplace='xsoar',
format_dependencies_only=False):
""" Re-formats metadata according to marketplace metadata format defined in issue #19786 and writes back
the result.
Args:
index_folder_path (str): downloaded index folder directory path.
packs_dependencies_mapping (dict): all packs dependencies lookup mapping.
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
statistics_handler (StatisticsHandler): The marketplace statistics handler
packs_dict (dict): dict of all packs relevant for current marketplace, as {pack_id: pack_object}.
marketplace (str): Marketplace of current upload.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
bool: True is returned in case metadata file was parsed successfully, otherwise False.
bool: True is returned in pack is missing dependencies.
"""
task_status = False
packs_dict = packs_dict if packs_dict else {}
is_missing_dependencies = False
try:
self.set_pack_dependencies(packs_dependencies_mapping, packs_dict, marketplace=marketplace)
logging.info(f"Loading pack dependencies metadata for {self._pack_name} pack")
dependencies_metadata_dict, is_missing_dependencies = self._load_pack_dependencies_metadata(
index_folder_path, packs_dict)
self._enhance_pack_attributes(index_folder_path, dependencies_metadata_dict,
statistics_handler, format_dependencies_only)
formatted_metadata = self._parse_pack_metadata(build_number, commit_hash)
metadata_path = os.path.join(self._pack_path, Pack.METADATA) # deployed metadata path after parsing
json_write(metadata_path, formatted_metadata) # writing back parsed metadata
logging.success(f"Finished formatting {self._pack_name} packs's {Pack.METADATA} {metadata_path} file.")
task_status = True
except Exception as e:
logging.exception(f"Failed in formatting {self._pack_name} pack metadata. Additional Info: {str(e)}")
finally:
return task_status, is_missing_dependencies
@staticmethod
def pack_created_in_time_delta(pack_name, time_delta: timedelta, index_folder_path: str) -> bool:
"""
Checks if pack created before delta specified in the 'time_delta' argument and return boolean according
to the result
Args:
pack_name: the pack name.
time_delta: time_delta to check if pack was created before.
index_folder_path: downloaded index folder directory path.
Returns:
True if pack was created before the time_delta from now, and False otherwise.
"""
pack_creation_time_str = Pack._calculate_pack_creation_date(pack_name, index_folder_path)
return datetime.utcnow() - datetime.strptime(pack_creation_time_str, Metadata.DATE_FORMAT) < time_delta
def _get_pack_creation_date(self, index_folder_path):
return self._calculate_pack_creation_date(self._pack_name, index_folder_path)
@staticmethod
def _calculate_pack_creation_date(pack_name, index_folder_path):
""" Gets the pack created date.
Args:
index_folder_path (str): downloaded index folder directory path.
Returns:
datetime: Pack created date.
"""
created_time = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
metadata = load_json(os.path.join(index_folder_path, pack_name, Pack.METADATA))
if metadata:
if metadata.get(Metadata.CREATED):
created_time = metadata.get(Metadata.CREATED, '')
else:
raise Exception(f'The metadata file of the {pack_name} pack does not contain "{Metadata.CREATED}" time')
return created_time
def _get_pack_update_date(self, index_folder_path):
""" Gets the pack update date.
Args:
index_folder_path (str): downloaded index folder directory path.
Returns:
datetime: Pack update date.
"""
latest_changelog_released_date = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
changelog = load_json(os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON))
if changelog and not self.is_modified:
packs_latest_release_notes = max(Version(ver) for ver in changelog)
latest_changelog_version = changelog.get(str(packs_latest_release_notes), {})
latest_changelog_released_date = latest_changelog_version.get('released')
return latest_changelog_released_date
def set_pack_dependencies(self, packs_dependencies_mapping, packs_dict, marketplace='xsoar'):
"""
Retrieve all pack's dependencies by merging the calculated dependencies from pack_dependencies.json file, given
as input priorly, and the hard coded dependencies featured in the pack_metadata.json file.
This is done for both first level dependencies and the all levels dependencies.
Args:
packs_dependencies_mapping: the calculated dependencies from pack_dependencies.json file
packs_dict (dict): Dict of packs relevant for current marketplace as {pack_name: pack_object}
marketplace: the current marketplace this upload is for
"""
pack_dependencies_mapping = packs_dependencies_mapping.get(self._pack_name, {})
first_level_dependencies = pack_dependencies_mapping.get(Metadata.DEPENDENCIES, {})
all_levels_dependencies = pack_dependencies_mapping.get(Metadata.ALL_LEVELS_DEPENDENCIES, [])
displayed_images_dependent_on_packs = pack_dependencies_mapping.get(Metadata.DISPLAYED_IMAGES, [])
# filter out packs that are not a part of the marketplace this upload is for
first_level_dependencies = {k: v for k, v in first_level_dependencies.items() if k in packs_dict}
all_levels_dependencies = [k for k in all_levels_dependencies if k in packs_dict]
displayed_images_dependent_on_packs = [k for k in displayed_images_dependent_on_packs if k in packs_dict]
if Metadata.DISPLAYED_IMAGES not in self._user_metadata:
self._user_metadata[Metadata.DISPLAYED_IMAGES] = displayed_images_dependent_on_packs
if Metadata.DEPENDENCIES not in self._user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = {}
if self._pack_name != GCPConfig.BASE_PACK:
# add base as a mandatory pack dependency, by design for all packs
first_level_dependencies.update(BASE_PACK_DEPENDENCY_DICT)
# update the calculated dependencies with the hardcoded dependencies
first_level_dependencies.update(self.user_metadata[Metadata.DEPENDENCIES])
# If it is a core pack, check that no new mandatory packs (that are not core packs) were added
# They can be overridden in the user metadata to be not mandatory so we need to check there as well
core_packs = GCPConfig.get_core_packs(marketplace)
if self._pack_name in core_packs:
mandatory_dependencies = [k for k, v in first_level_dependencies.items()
if v.get(Metadata.MANDATORY, False) is True
and k not in core_packs
and k not in self._user_metadata[Metadata.DEPENDENCIES].keys()]
if mandatory_dependencies:
raise Exception(f'New mandatory dependencies {mandatory_dependencies} were '
f'found in the core pack {self._pack_name}')
self._user_metadata[Metadata.DEPENDENCIES] = first_level_dependencies
self._first_level_dependencies = first_level_dependencies
self._all_levels_dependencies = all_levels_dependencies
self._displayed_images_dependent_on_packs = displayed_images_dependent_on_packs
def prepare_for_index_upload(self):
""" Removes and leaves only necessary files in pack folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
files_to_leave = [Pack.METADATA, Pack.CHANGELOG_JSON, Pack.README]
try:
for file_or_folder in os.listdir(self._pack_path):
files_or_folder_path = os.path.join(self._pack_path, file_or_folder)
if file_or_folder in files_to_leave:
continue
if os.path.isdir(files_or_folder_path):
shutil.rmtree(files_or_folder_path)
else:
os.remove(files_or_folder_path)
task_status = True
except Exception:
logging.exception(f"Failed in preparing index for upload in {self._pack_name} pack.")
finally:
return task_status
@staticmethod
def _get_spitted_yml_image_data(root, target_folder_files):
""" Retrieves pack integration image and integration display name and returns binding image data.
Args:
root (str): full path to the target folder to search integration image.
target_folder_files (list): list of files inside the targeted folder.
Returns:
dict: path to integration image and display name of the integration.
"""
image_data = {}
for pack_file in target_folder_files:
if pack_file.startswith('.'):
continue
if pack_file.endswith('_image.png'):
image_data['repo_image_path'] = os.path.join(root, pack_file)
elif pack_file.endswith('.yml'):
with open(os.path.join(root, pack_file), 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
return image_data
def _get_image_data_from_yml(self, pack_file_path):
""" Creates temporary image file and retrieves integration display name.
Args:
pack_file_path (str): full path to the target yml_path integration yml to search integration image.
Returns:
dict: path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
"""
image_data = {}
if pack_file_path.endswith('.yml'):
with open(pack_file_path, 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
# create temporary file of base64 decoded data
integration_name = integration_yml.get('name', '')
base64_image = integration_yml['image'].split(',')[1] if integration_yml.get('image') else None
if not base64_image:
logging.warning(f"{integration_name} integration image was not found in {self._pack_name} pack")
return {}
temp_image_name = f'{integration_name.replace(" ", "")}_image.png'
temp_image_path = os.path.join(self._pack_path, temp_image_name)
with open(temp_image_path, 'wb') as image_file:
image_file.write(base64.b64decode(base64_image))
self._remove_files_list.append(temp_image_name) # add temporary file to tracking list
image_data['image_path'] = temp_image_path
image_data['integration_path_basename'] = os.path.basename(pack_file_path)
logging.info(f"Created temporary integration {image_data['display_name']} image for {self._pack_name} pack")
return image_data
def _search_for_images(self, target_folder):
""" Searches for png files in targeted folder.
Args:
target_folder (str): full path to directory to search.
Returns:
list: list of dictionaries that include image path and display name of integration, example:
[{'image_path': image_path, 'display_name': integration_display_name},...]
"""
target_folder_path = os.path.join(self._pack_path, target_folder)
images_list = []
if os.path.exists(target_folder_path):
for pack_item in os.scandir(target_folder_path):
image_data = self._get_image_data_from_yml(pack_item.path)
if image_data and image_data not in images_list:
images_list.append(image_data)
return images_list
def check_if_exists_in_index(self, index_folder_path):
""" Checks if pack is sub-folder of downloaded index.
Args:
index_folder_path (str): index folder full path.
Returns:
bool: whether the operation succeeded.
bool: whether pack exists in index folder.
"""
task_status, exists_in_index = False, False
try:
if not os.path.exists(index_folder_path):
logging.error(f"{GCPConfig.INDEX_NAME} does not exists.")
return task_status, exists_in_index
exists_in_index = os.path.exists(os.path.join(index_folder_path, self._pack_name))
task_status = True
except Exception:
logging.exception(f"Failed searching {self._pack_name} pack in {GCPConfig.INDEX_NAME}")
finally:
return task_status, exists_in_index
@staticmethod
def remove_contrib_suffix_from_name(display_name: str) -> str:
""" Removes the contribution details suffix from the integration's display name
Args:
display_name (str): The integration display name.
Returns:
str: The display name without the contrib details suffix
"""
contribution_suffixes = ('(Partner Contribution)', '(Developer Contribution)', '(Community Contribution)')
for suffix in contribution_suffixes:
index = display_name.find(suffix)
if index != -1:
display_name = display_name[:index].rstrip(' ')
break
return display_name
@staticmethod
def need_to_upload_integration_image(image_data: dict, integration_dirs: list, unified_integrations: list):
""" Checks whether needs to upload the integration image or not.
We upload in one of the two cases:
1. The integration_path_basename is one of the integration dirs detected
2. The integration_path_basename is one of the added/modified unified integrations
Args:
image_data (dict): path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
integration_dirs (list): The list of integrations to search in for images
unified_integrations (list): The list of unified integrations to upload their image
Returns:
bool: True if we need to upload the image or not
"""
integration_path_basename = image_data['integration_path_basename']
return any([
re.findall(BucketUploadFlow.INTEGRATION_DIR_REGEX, integration_path_basename)[0] in integration_dirs,
integration_path_basename in unified_integrations
])
def upload_integration_images(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack integrations images to gcs.
The returned result of integration section are defined in issue #19786.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where image will be uploaded.
storage_base_path (str): The target destination of the upload in the target bucket.
detect_changes (bool): Whether to detect changes or upload all images in any case.
diff_files_list (list): The list of all modified/added files found in the diff
Returns:
bool: whether the operation succeeded.
list: list of dictionaries with uploaded pack integration images.
"""
task_status = True
integration_images = []
integration_dirs = []
unified_integrations = []
try:
if detect_changes:
# detect added/modified integration images
for file in diff_files_list:
if self.is_integration_image(file.a_path):
# integration dir name will show up in the unified integration file path in content_packs.zip
integration_dirs.append(os.path.basename(os.path.dirname(file.a_path)))
elif self.is_unified_integration(file.a_path):
# if the file found in the diff is a unified integration we upload its image
unified_integrations.append(os.path.basename(file.a_path))
pack_local_images = self._search_for_images(target_folder=PackFolders.INTEGRATIONS.value)
if not pack_local_images:
return True # return empty list if no images were found
pack_storage_root_path = os.path.join(storage_base_path, self._pack_name)
for image_data in pack_local_images:
image_path = image_data.get('image_path')
if not image_path:
raise Exception(f"{self._pack_name} pack integration image was not found")
image_name = os.path.basename(image_path)
image_storage_path = os.path.join(pack_storage_root_path, image_name)
pack_image_blob = storage_bucket.blob(image_storage_path)
if not detect_changes or \
self.need_to_upload_integration_image(image_data, integration_dirs, unified_integrations):
# upload the image if needed
logging.info(f"Uploading image: {image_name} of integration: {image_data.get('display_name')} "
f"from pack: {self._pack_name}")
with open(image_path, "rb") as image_file:
pack_image_blob.upload_from_file(image_file)
self._uploaded_integration_images.append(image_name)
if GCPConfig.USE_GCS_RELATIVE_PATH:
image_gcs_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, image_name))
else:
image_gcs_path = pack_image_blob.public_url
integration_name = image_data.get('display_name', '')
if self.support_type != Metadata.XSOAR_SUPPORT:
integration_name = self.remove_contrib_suffix_from_name(integration_name)
integration_images.append({
'name': integration_name,
'imagePath': image_gcs_path
})
if self._uploaded_integration_images:
logging.info(f"Uploaded {len(self._uploaded_integration_images)} images for {self._pack_name} pack.")
except Exception as e:
task_status = False
logging.exception(f"Failed to upload {self._pack_name} pack integration images. Additional Info: {str(e)}")
finally:
self._displayed_integration_images = integration_images
return task_status
def copy_integration_images(self, production_bucket, build_bucket, images_data, storage_base_path,
build_bucket_base_path):
""" Copies all pack's integration images from the build bucket to the production bucket
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
storage_base_path (str): The target destination of the upload in the target bucket.
build_bucket_base_path (str): The path of the build bucket in gcp.
Returns:
bool: Whether the operation succeeded.
"""
task_status = True
num_copied_images = 0
err_msg = f"Failed copying {self._pack_name} pack integrations images."
pc_uploaded_integration_images = images_data.get(self._pack_name, {}).get(BucketUploadFlow.INTEGRATIONS, [])
for image_name in pc_uploaded_integration_images:
build_bucket_image_path = os.path.join(build_bucket_base_path, self._pack_name, image_name)
build_bucket_image_blob = build_bucket.blob(build_bucket_image_path)
if not build_bucket_image_blob.exists():
logging.error(f"Found changed/added integration image {image_name} in content repo but "
f"{build_bucket_image_path} does not exist in build bucket")
task_status = False
else:
logging.info(f"Copying {self._pack_name} pack integration image: {image_name}")
try:
copied_blob = build_bucket.copy_blob(
blob=build_bucket_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name, image_name)
)
if not copied_blob.exists():
logging.error(f"Copy {self._pack_name} integration image: {build_bucket_image_blob.name} "
f"blob to {copied_blob.name} blob failed.")
task_status = False
else:
num_copied_images += 1
except Exception as e:
logging.exception(f"{err_msg}. Additional Info: {str(e)}")
return False
if not task_status:
logging.error(err_msg)
else:
if num_copied_images == 0:
logging.info(f"No added/modified integration images were detected in {self._pack_name} pack.")
else:
logging.success(f"Copied {num_copied_images} images for {self._pack_name} pack.")
return task_status
def upload_author_image(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack author image to gcs.
Searches for `Author_image.png` and uploads author image to gcs. In case no such image was found,
default Base pack image path is used and it's gcp path is returned.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where author image will be uploaded.
storage_base_path (str): the path under the bucket to upload to.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload the author image in any case.
Returns:
bool: whether the operation succeeded.
str: public gcp path of author image.
"""
task_status = True
author_image_storage_path = ""
try:
author_image_path = os.path.join(self._pack_path, Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if os.path.exists(author_image_path):
image_to_upload_storage_path = os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
pack_author_image_blob = storage_bucket.blob(image_to_upload_storage_path)
if not detect_changes or any(self.is_author_image(file.a_path) for file in diff_files_list):
# upload the image if needed
with open(author_image_path, "rb") as author_image_file:
pack_author_image_blob.upload_from_file(author_image_file)
self._uploaded_author_image = True
logging.success(f"Uploaded successfully {self._pack_name} pack author image")
if GCPConfig.USE_GCS_RELATIVE_PATH:
author_image_storage_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME))
else:
author_image_storage_path = pack_author_image_blob.public_url
elif self.support_type == Metadata.XSOAR_SUPPORT: # use default Base pack image for xsoar supported packs
author_image_storage_path = os.path.join(GCPConfig.IMAGES_BASE_PATH, GCPConfig.BASE_PACK,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if not GCPConfig.USE_GCS_RELATIVE_PATH:
# disable-secrets-detection-start
author_image_storage_path = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name,
author_image_storage_path)
# disable-secrets-detection-end
logging.info((f"Skipping uploading of {self._pack_name} pack author image "
f"and use default {GCPConfig.BASE_PACK} pack image"))
else:
logging.info(f"Skipping uploading of {self._pack_name} pack author image. "
f"The pack is defined as {self.support_type} support type")
except Exception:
logging.exception(f"Failed uploading {self._pack_name} pack author image.")
task_status = False
author_image_storage_path = ""
finally:
self._author_image = author_image_storage_path
return task_status
def copy_author_image(self, production_bucket, build_bucket, images_data, storage_base_path, build_bucket_base_path):
""" Copies pack's author image from the build bucket to the production bucket
Searches for `Author_image.png`, In case no such image was found, default Base pack image path is used and
it's gcp path is returned.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
storage_base_path (str): The target destination of the upload in the target bucket.
build_bucket_base_path (str): The path of the build bucket in gcp.
Returns:
bool: Whether the operation succeeded.
"""
if images_data.get(self._pack_name, {}).get(BucketUploadFlow.AUTHOR, False):
build_author_image_path = os.path.join(build_bucket_base_path, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
build_author_image_blob = build_bucket.blob(build_author_image_path)
if build_author_image_blob.exists():
try:
copied_blob = build_bucket.copy_blob(
blob=build_author_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME))
if not copied_blob.exists():
logging.error(f"Failed copying {self._pack_name} pack author image.")
return False
else:
logging.success(f"Copied successfully {self._pack_name} pack author image.")
return True
except Exception as e:
logging.exception(f"Failed copying {Pack.AUTHOR_IMAGE_NAME} for {self._pack_name} pack. "
f"Additional Info: {str(e)}")
return False
else:
logging.error(f"Found changed/added author image in content repo for {self._pack_name} pack but "
f"image does not exist in build bucket in path {build_author_image_path}.")
return False
else:
logging.info(f"No added/modified author image was detected in {self._pack_name} pack.")
return True
def upload_images(self, index_folder_path, storage_bucket, storage_base_path, diff_files_list):
"""
Upload the images related to the pack.
The image is uploaded in the case it was modified, OR if this is the first time the current pack is being
uploaded to this current marketplace (#46785).
Args:
index_folder_path (str): the path to the local index folder
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where author image will be uploaded.
storage_base_path (str): the path under the bucket to upload to.
diff_files_list (list): The list of all modified/added files found in the diff
Returns:
True if the images were successfully uploaded, false otherwise.
"""
detect_changes = os.path.exists(os.path.join(index_folder_path, self.name, Pack.METADATA)) or self.hidden
# Don't check if the image was modified if this is the first time it is uploaded to this marketplace, meaning it
# doesn't exist in the index (and it isn't deprecated)
if not detect_changes:
logging.info(f'Uploading images of pack {self.name} which did not exist in this marketplace before')
task_status = self.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, detect_changes)
if not task_status:
self._status = PackStatus.FAILED_IMAGES_UPLOAD.name
self.cleanup()
return False
task_status = self.upload_author_image(storage_bucket, storage_base_path, diff_files_list, detect_changes)
if not task_status:
self._status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
self.cleanup()
return False
return True
def cleanup(self):
""" Finalization action, removes extracted pack folder.
"""
if os.path.exists(self._pack_path):
shutil.rmtree(self._pack_path)
logging.info(f"Cleanup {self._pack_name} pack from: {self._pack_path}")
def is_changelog_exists(self):
""" Indicates whether the local changelog of a given pack exists or not
Returns:
bool: The answer
"""
return os.path.isfile(os.path.join(self._pack_path, Pack.CHANGELOG_JSON))
def is_failed_to_upload(self, failed_packs_dict):
"""
Checks if the pack was failed to upload in Prepare Content step in Create Instances job
Args:
failed_packs_dict (dict): The failed packs file
Returns:
bool: Whether the operation succeeded.
str: The pack's failing status
"""
if self._pack_name in failed_packs_dict:
return True, failed_packs_dict[self._pack_name].get('status')
else:
return False, str()
def is_integration_image(self, file_path: str):
""" Indicates whether a file_path is an integration image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an integration image or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name)),
file_path.endswith('.png'),
'image' in os.path.basename(file_path.lower()),
os.path.basename(file_path) != Pack.AUTHOR_IMAGE_NAME
])
def is_author_image(self, file_path: str):
""" Indicates whether a file_path is an author image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an author image or False otherwise
"""
return file_path == os.path.join(PACKS_FOLDER, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
def is_unified_integration(self, file_path: str):
""" Indicates whether a file_path is a unified integration yml file or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is a unified integration or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name, PackFolders.INTEGRATIONS.value)),
os.path.basename(os.path.dirname(file_path)) == PackFolders.INTEGRATIONS.value,
os.path.basename(file_path).startswith('integration'),
os.path.basename(file_path).endswith('.yml')
])
def add_bc_entries_if_needed(self, release_notes_dir: str, changelog: Dict[str, Any]) -> None:
"""
Receives changelog, checks if there exists a BC version in each changelog entry (as changelog entry might be
zipped into few RN versions, check if at least one of the versions is BC).
Check if RN is BC is done by doing the following:
1) Check if RN has corresponding config file, e.g 1_0_1.md has corresponding 1_0_1.json file.
2) If it does, check if `isBreakingChanges` field is true
If such version exists, adds a
true value to 'breakingChanges' field.
if JSON file also has breakingChangesNotes configures, adds `breakingChangesNotes` field to changelog file.
This function iterates every entry in changelog because it takes into consideration four scenarios:
a) Entry without breaking changes, changes to entry with breaking changes (because at least one of the
versions in the entry was marked as breaking changes).
b) Entry without breaking changes, does not change.
c) Entry with breaking changes, changes to entry without breaking changes (because all the BC versions
corresponding to the changelog entry were re-marked as not BC).
d) Entry with breaking changes, does not change.
Args:
release_notes_dir (str): RN dir path.
changelog (Dict[str, Any]): Changelog data represented as a dict.
Returns:
(None): Modifies changelog, adds bool value to 'breakingChanges' and `breakingChangesNotes` fields to every
changelog entry, according to the logic described above.
"""
if not os.path.exists(release_notes_dir):
return
bc_version_to_text: Dict[str, Optional[str]] = self._breaking_changes_versions_to_text(release_notes_dir)
loose_versions: List[Version] = [Version(bc_ver) for bc_ver in bc_version_to_text]
predecessor_version: Version = Version('0.0.0')
for changelog_entry in sorted(changelog.keys(), key=Version):
rn_loose_version: Version = Version(changelog_entry)
if bc_versions := self._changelog_entry_bc_versions(predecessor_version, rn_loose_version, loose_versions,
bc_version_to_text):
logging.info(f'Changelog entry {changelog_entry} contains BC versions')
changelog[changelog_entry]['breakingChanges'] = True
if bc_text := self._calculate_bc_text(release_notes_dir, bc_versions):
changelog[changelog_entry]['breakingChangesNotes'] = bc_text
else:
changelog[changelog_entry].pop('breakingChangesNotes', None)
else:
changelog[changelog_entry].pop('breakingChanges', None)
predecessor_version = rn_loose_version
def _calculate_bc_text(self, release_notes_dir: str, bc_version_to_text: Dict[str, Optional[str]]) -> Optional[str]:
"""
Receives BC versions to text dict for current changelog entry. Calculates text for BC entry.
Args:
release_notes_dir (str): RN dir path.
bc_version_to_text (Dict[str, Optional[str]): {bc version, bc_text}
Returns:
(Optional[str]): Text for entry if such was added.
If none is returned, server will list the full RN as the BC notes instead.
"""
# Handle cases of one BC version in entry.
if len(bc_version_to_text) == 1:
return list(bc_version_to_text.values())[0]
# Handle cases of two or more BC versions in entry.
text_of_bc_versions, bc_without_text = self._split_bc_versions_with_and_without_text(bc_version_to_text)
if len(text_of_bc_versions) == 0:
# Case 1: Not even one BC version contains breaking text.
return None
elif len(text_of_bc_versions) < len(bc_version_to_text):
# Case 2: Only part of BC versions contains breaking text.
return self._handle_many_bc_versions_some_with_text(release_notes_dir, text_of_bc_versions, bc_without_text)
else:
# Case 3: All BC versions contains text.
# Important: Currently, implementation of aggregating BCs was decided to concat between them
# In the future this might be needed to re-thought.
return '\n'.join(bc_version_to_text.values()) # type: ignore[arg-type]
def _handle_many_bc_versions_some_with_text(self, release_notes_dir: str, text_of_bc_versions: List[str],
bc_versions_without_text: List[str], ) -> str:
"""
Calculates text for changelog entry where some BC versions contain text and some don't.
Important: Currently, implementation of aggregating BCs was decided to concat between them (and if BC version
does not have a BC text - concat the whole RN). In the future this might be needed to re-thought.
Args:
release_notes_dir (str): RN dir path.
text_of_bc_versions ([List[str]): List of text of BC versions with text.
bc_versions_without_text ([List[str]): List of BC versions without text.
Returns:
(str): Text for BC entry.
"""
bc_with_text_str = '\n'.join(text_of_bc_versions)
rn_file_names_without_text = [f'''{bc_version.replace('.', '_')}.md''' for
bc_version in bc_versions_without_text]
other_rn_text: str = self._get_release_notes_concat_str(release_notes_dir, rn_file_names_without_text)
if not other_rn_text:
logging.error('No RN text, although text was expected to be found for versions'
f' {rn_file_names_without_text}.')
return f'{bc_with_text_str}{other_rn_text}'
@staticmethod
def _get_release_notes_concat_str(release_notes_dir: str, rn_file_names: List[str]) -> str:
"""
Concat all RN data found in given `rn_file_names`.
Args:
release_notes_dir (str): RN dir path.
rn_file_names (List[str]): List of all RN files to concat their data.
Returns:
(str): Concat RN data
"""
concat_str: str = ''
for rn_file_name in rn_file_names:
rn_file_path = os.path.join(release_notes_dir, rn_file_name)
with open(rn_file_path, 'r') as f:
# Will make the concat string start with new line on purpose.
concat_str = f'{concat_str}\n{f.read()}'
return concat_str
@staticmethod
def _split_bc_versions_with_and_without_text(bc_versions: Dict[str, Optional[str]]) -> Tuple[List[str], List[str]]:
"""
Splits BCs to tuple of BCs text of BCs containing text, and BCs versions that do not contain BC text.
Args:
bc_versions (Dict[str, Optional[str]): BC versions mapped to text if exists.
Returns:
(Tuple[List[str], List[str]]): (text of bc versions with text, bc_versions_without_text).
"""
text_of_bc_versions_with_tests: List[str] = []
bc_versions_without_text: List[str] = []
for bc_version, bc_text in bc_versions.items():
if bc_text:
text_of_bc_versions_with_tests.append(bc_text)
else:
bc_versions_without_text.append(bc_version)
return text_of_bc_versions_with_tests, bc_versions_without_text
@staticmethod
def _breaking_changes_versions_to_text(release_notes_dir: str) -> Dict[str, Optional[str]]:
"""
Calculates every BC version in given RN dir and maps it to text if exists.
Currently, text from a BC version is calculated in the following way:
- If RN has `breakingChangesNotes` entry in its corresponding config file, then use the value of that field
as the text of the BC to be represented.
- Else, use the whole RN text as BC text.
Args:
release_notes_dir (str): RN dir path.
Returns:
(Dict[str, Optional[str]]): {dotted_version, text}.
"""
bc_version_to_text: Dict[str, Optional[str]] = dict()
# Get all config files in RN dir
rn_config_file_names = filter_dir_files_by_extension(release_notes_dir, '.json')
for file_name in rn_config_file_names:
file_data: Dict = load_json(os.path.join(release_notes_dir, file_name))
# Check if version is BC
if file_data.get('breakingChanges'):
# Processing name for easier calculations later on
processed_name: str = underscore_file_name_to_dotted_version(file_name)
bc_version_to_text[processed_name] = file_data.get('breakingChangesNotes')
return bc_version_to_text
@staticmethod
def _changelog_entry_bc_versions(predecessor_version: Version, rn_version: Version,
breaking_changes_versions: List[Version],
bc_version_to_text: Dict[str, Optional[str]]) -> Dict[str, Optional[str]]:
"""
Gets all BC versions of given changelog entry, every BC s.t predecessor_version < BC version <= rn_version.
Args:
predecessor_version (Version): Predecessor version in numeric version order.
rn_version (Version): RN version of current processed changelog entry.
breaking_changes_versions (List[Version]): List of BC versions.
bc_version_to_text (Dict[str, Optional[str]): List of all BC to text in the given RN dir.
Returns:
Dict[str, Optional[str]]: Partial list of `bc_version_to_text`, containing only relevant versions between
given versions.
"""
return {str(bc_ver): bc_version_to_text.get(str(bc_ver)) for bc_ver in breaking_changes_versions if
predecessor_version < bc_ver <= rn_version}
# HELPER FUNCTIONS
def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage_data: dict = packs_results_file.get(stage, {})
successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, updated_private_packs: list,
images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
updated_private_packs (list) : The list of all private packs that were updated
images_data (dict): A dict containing all images that were uploaded for each pack
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False",
BucketUploadFlow.LATEST_VERSION: pack.latest_version
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if updated_private_packs:
successful_private_packs_dict: dict = {
BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS: {pack_name: {} for pack_name in updated_private_packs}
}
packs_results[stage].update(successful_private_packs_dict)
logging.debug(f"Successful private packs {successful_private_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
def load_json(file_path: str) -> dict:
""" Reads and loads json file.
Args:
file_path (str): full path to json file.
Returns:
dict: loaded json file.
"""
try:
if file_path and os.path.exists(file_path):
with open(file_path, 'r') as json_file:
result = json.load(json_file)
else:
result = {}
return result
except json.decoder.JSONDecodeError:
return {}
def json_write(file_path: str, data: Union[list, dict]):
""" Writes given data to a json file
Args:
file_path: The file path
data: The data to write
"""
with open(file_path, "w") as f:
f.write(json.dumps(data, indent=4))
def init_storage_client(service_account=None):
"""Initialize google cloud storage client.
In case of local dev usage the client will be initialized with user default credentials.
Otherwise, client will be initialized from service account json that is stored in CircleCI.
Args:
service_account (str): full path to service account json.
Return:
storage.Client: initialized google cloud storage client.
"""
if service_account:
storage_client = storage.Client.from_service_account_json(service_account)
logging.info("Created gcp service account")
return storage_client
else:
# in case of local dev use, ignored the warning of non use of service account.
warnings.filterwarnings("ignore", message=google.auth._default._CLOUD_SDK_CREDENTIALS_WARNING)
credentials, project = google.auth.default()
storage_client = storage.Client(credentials=credentials, project=project)
logging.info("Created gcp private account")
return storage_client
def input_to_list(input_data, capitalize_input=False):
""" Helper function for handling input list or str from the user.
Args:
input_data (list or str): input from the user to handle.
capitalize_input (boo): whether to capitalize the input list data or not.
Returns:
list: returns the original list or list that was split by comma.
"""
input_data = input_data if input_data else []
input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s]
if capitalize_input:
return [" ".join([w.title() if w.islower() else w for w in i.split()]) for i in input_data]
else:
return input_data
def get_valid_bool(bool_input):
""" Converts and returns valid bool.
Returns:
bool: converted bool input.
"""
return bool(strtobool(bool_input)) if isinstance(bool_input, str) else bool_input
def convert_price(pack_id, price_value_input=None):
""" Converts to integer value price input. In case no price input provided, return zero as price.
Args:
pack_id (str): pack unique identifier.
price_value_input (str): price string to convert.
Returns:
int: converted to int pack price.
"""
try:
if not price_value_input:
return 0 # in case no price was supported, return 0
else:
return int(price_value_input) # otherwise convert to int and return result
except Exception:
logging.exception(f"{pack_id} pack price is not valid. The price was set to 0.")
return 0
def get_updated_server_version(current_string_version, compared_content_item, pack_name):
""" Compares two semantic server versions and returns the higher version between them.
Args:
current_string_version (str): current string version.
compared_content_item (dict): compared content item entity.
pack_name (str): the pack name (id).
Returns:
str: latest version between compared versions.
"""
lower_version_result = current_string_version
try:
compared_string_version = compared_content_item.get('fromversion') or compared_content_item.get(
'fromVersion') or "99.99.99"
current_version, compared_version = Version(current_string_version), Version(compared_string_version)
if current_version > compared_version:
lower_version_result = compared_string_version
except Exception:
content_item_name = compared_content_item.get('name') or compared_content_item.get(
'display') or compared_content_item.get('id') or compared_content_item.get('details', '')
logging.exception(f"{pack_name} failed in version comparison of content item {content_item_name}.")
finally:
return lower_version_result
def get_content_git_client(content_repo_path: str):
""" Initializes content repo client.
Args:
content_repo_path (str): content repo full path
Returns:
git.repo.base.Repo: content repo object.
"""
return git.Repo(content_repo_path)
def get_recent_commits_data(content_repo: Any, index_folder_path: str, is_bucket_upload_flow: bool,
is_private_build: bool = False, circle_branch: str = "master"):
""" Returns recent commits hashes (of head and remote master)
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: last commit hash of head.
str: previous commit depending on the flow the script is running
"""
return content_repo.head.commit.hexsha, get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow,
is_private_build, circle_branch)
def get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow, is_private_build, circle_branch):
""" If running in bucket upload workflow we want to get the commit in the index which is the index
We've last uploaded to production bucket. Otherwise, we are in a commit workflow and the diff should be from the
head of origin/master
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: previous commit depending on the flow the script is running
"""
if is_bucket_upload_flow:
return get_last_upload_commit_hash(content_repo, index_folder_path)
elif is_private_build:
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
logging.info(f"Using origin/master HEAD~1 commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
else:
if circle_branch == 'master':
head_str = "HEAD~1"
# if circle branch is master than current commit is origin/master HEAD, so we need to diff with HEAD~1
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
else:
head_str = "HEAD"
# else we are on a regular branch and the diff should be done with origin/master HEAD
previous_master_head_commit = content_repo.commit('origin/master').hexsha
logging.info(f"Using origin/master {head_str} commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
def get_last_upload_commit_hash(content_repo, index_folder_path):
"""
Returns the last origin/master commit hash that was uploaded to the bucket
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path: The path to the index folder
Returns:
The commit hash
"""
inner_index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
if not os.path.exists(inner_index_json_path):
logging.critical(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
sys.exit(1)
else:
inner_index_json_file = load_json(inner_index_json_path)
if 'commit' in inner_index_json_file:
last_upload_commit_hash = inner_index_json_file['commit']
logging.info(f"Retrieved the last commit that was uploaded to production: {last_upload_commit_hash}")
else:
logging.critical(f"No commit field in {GCPConfig.INDEX_NAME}.json, content: {str(inner_index_json_file)}")
sys.exit(1)
try:
last_upload_commit = content_repo.commit(last_upload_commit_hash).hexsha
logging.info(f"Using commit hash {last_upload_commit} from index.json to diff with.")
return last_upload_commit
except Exception as e:
logging.critical(f'Commit {last_upload_commit_hash} in {GCPConfig.INDEX_NAME}.json does not exist in content '
f'repo. Additional info:\n {e}')
sys.exit(1)
def is_ignored_pack_file(modified_file_path_parts):
""" Indicates whether a pack file needs to be ignored or not.
Args:
modified_file_path_parts: The modified file parts, e.g. if file path is "a/b/c" then the
parts list is ["a", "b", "c"]
Returns:
(bool): True if the file should be ignored, False otherwise
"""
for file_suffix in PackIgnored.ROOT_FILES:
if file_suffix in modified_file_path_parts:
return True
for pack_folder, file_suffixes in PackIgnored.NESTED_FILES.items():
if pack_folder in modified_file_path_parts:
if not file_suffixes: # Ignore all pack folder files
return True
for file_suffix in file_suffixes:
if file_suffix in modified_file_path_parts[-1]:
return True
for pack_folder in PackIgnored.NESTED_DIRS:
if pack_folder in modified_file_path_parts:
pack_folder_path = os.sep.join(modified_file_path_parts[:modified_file_path_parts.index(pack_folder) + 1])
file_path = os.sep.join(modified_file_path_parts)
for folder_path in [f for f in glob.glob(os.path.join(pack_folder_path, '*/*')) if os.path.isdir(f)]:
# Checking for all 2nd level directories. e.g. test_data directory
if file_path.startswith(folder_path):
return True
return False
def filter_dir_files_by_extension(release_notes_dir: str, extension: str) -> List[str]:
"""
Receives path to RN dir, filters only files in RN dir corresponding to the extension.
Needed because RN directory will be extended to contain JSON files for configurations,
see 'release_notes_bc_calculator.py'
Args:
release_notes_dir (str): Path to RN dir
extension (str): Extension to filter by.
Returns:
(List[str]): List of all of the files in directory corresponding to the extension.
"""
return [file_name for file_name in os.listdir(release_notes_dir) if file_name.endswith(extension)]
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and false otherwise (meaning there are versions in the release notes directory that are
missing in the changelog, therefore they have been aggregated) and this function asserts that.
Note: The comparison is done against the release notes directory to avoid cases where there are missing versions in
the changelog due to inconsistent versions numbering, such as major version bumps. (For example, if the versions
1.2.7 and 1.3.0 are two consecutive keys in the changelog, we need to determine if 1.3.0 has aggregated the versions
1.2.8-1.3.0, OR 1.3.0 is the consecutive version right after 1.2.7 but is a major bump. in order to check that, we
check it against the files in the release notes directory.)
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if not changelog.get(version):
return False
all_rn_versions = []
lowest_version = [Version('1.0.0')]
for filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(filename)
all_rn_versions.append(Version(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < Version(version)] + lowest_version
lower_versions_in_changelog = [Version(item) for item in changelog.keys() if
Version(item) < Version(version)] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
def underscore_file_name_to_dotted_version(file_name: str) -> str:
"""
Receives file name with expected format of x_x_x<extension>, and transforms it to dotted string.
Examples
- underscore_file_name_to_dotted_version(1_2_3.md) --> 1.2.3
- underscore_file_name_to_dotted_version(1_4_2.json) --> 1.4.2
Args:
file_name (str): File name.
Returns:
(str): Dotted version of file name
"""
return os.path.splitext(file_name)[0].replace('_', '.')
def get_last_commit_from_index(service_account):
""" Downloading index.json from GCP and extract last upload commit.
Args:
service_account: service account to connect to GCP
Returns: last upload commit.
"""
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(GCPConfig.PRODUCTION_BUCKET)
index_storage_path = os.path.join('content/packs/', f"{GCPConfig.INDEX_NAME}.json")
index_blob = storage_bucket.blob(index_storage_path)
index_string = index_blob.download_as_string()
index_json = json.loads(index_string)
return index_json.get('commit')
|
# -*- coding: utf-8 -*-
"""
Handler for the git pull command.
Copyright:
2020 by Clemens Rabe <clemens.rabe@clemensrabe.de>
All rights reserved.
This file is part of gitcache (https://github.com/seeraven/gitcache)
and is released under the "BSD 3-Clause License". Please see the ``LICENSE`` file
that is included as part of this package.
"""
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import logging
from .helpers import get_current_ref, get_mirror_url
from ..command_execution import pretty_call_command_retry
from ..config import Config
from ..database import Database
from ..git_mirror import GitMirror
# -----------------------------------------------------------------------------
# Logger
# -----------------------------------------------------------------------------
LOG = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Function Definitions
# -----------------------------------------------------------------------------
# pylint: disable=too-many-locals
def git_pull(git_options):
"""Handle a git pull command.
Args:
git_options (obj): The GitOptions object.
Return:
Returns 0 on success, otherwise the return code of the last failed
command.
"""
action = "Update"
config = Config()
repository = 'origin'
refs = []
if git_options.command_args:
repository = git_options.command_args[0]
refs = git_options.command_args[1:]
mirror_url = get_mirror_url(git_options)
if mirror_url and repository == 'origin':
database = Database()
mirror = GitMirror(url=mirror_url, database=database)
mirror.update()
database.increment_counter(mirror.path, "updates")
# The mirror.update() updates the LFS data of the default ref of
# the mirror repository, which should be 'master' or 'main'. If we
# are currently on a different branch, we want to update that branch
# as well.
if not refs:
refs.append(get_current_ref(git_options))
default_ref = mirror.get_default_ref()
for ref in refs:
if ref and ref != default_ref:
mirror.fetch_lfs(ref)
config = mirror.config
action = f"Update from mirror {mirror.path}"
return_code, _, _ = pretty_call_command_retry(
action,
'',
git_options.get_real_git_all_args(),
num_retries=config.get("Update", "Retries"),
command_timeout=config.get("Update", "CommandTimeout"),
output_timeout=config.get("Update", "OutputTimeout"))
return return_code
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
|
#!/usr/bin/env python3
from json.decoder import JSONDecodeError
import os
import re
import sys
import argparse
import json
import zipfile
import threading
import subprocess
import itertools
import configparser
import time
import uuid
from collections import OrderedDict
import paramiko
import pandas as pd
from common import *
sys.path.append(os.path.dirname(__file__) + '/../../../python')
import spdk.rpc as rpc # noqa
import spdk.rpc.client as rpc_client # noqa
class Server:
def __init__(self, name, general_config, server_config):
self.name = name
self.username = general_config["username"]
self.password = general_config["password"]
self.transport = general_config["transport"].lower()
self.nic_ips = server_config["nic_ips"]
self.mode = server_config["mode"]
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
self.irq_scripts_dir = server_config["irq_scripts_dir"]
self.local_nic_info = []
self._nics_json_obj = {}
self.svc_restore_dict = {}
self.sysctl_restore_dict = {}
self.tuned_restore_dict = {}
self.governor_restore = ""
self.tuned_profile = ""
self.enable_adq = False
self.adq_priority = None
if "adq_enable" in server_config and server_config["adq_enable"]:
self.enable_adq = server_config["adq_enable"]
self.adq_priority = 1
if "tuned_profile" in server_config:
self.tuned_profile = server_config["tuned_profile"]
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
@staticmethod
def get_uncommented_lines(lines):
return [line for line in lines if line and not line.startswith('#')]
def get_nic_name_by_ip(self, ip):
if not self._nics_json_obj:
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
for nic in self._nics_json_obj:
for addr in nic["addr_info"]:
if ip in addr["local"]:
return nic["ifname"]
def set_local_nic_info_helper(self):
pass
def set_local_nic_info(self, pci_info):
def extract_network_elements(json_obj):
nic_list = []
if isinstance(json_obj, list):
for x in json_obj:
nic_list.extend(extract_network_elements(x))
elif isinstance(json_obj, dict):
if "children" in json_obj:
nic_list.extend(extract_network_elements(json_obj["children"]))
if "class" in json_obj.keys() and "network" in json_obj["class"]:
nic_list.append(json_obj)
return nic_list
self.local_nic_info = extract_network_elements(pci_info)
# pylint: disable=R0201
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
return ""
def configure_system(self):
self.configure_services()
self.configure_sysctl()
self.configure_tuned()
self.configure_cpu_governor()
self.configure_irq_affinity()
def configure_adq(self):
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
self.adq_load_modules()
self.adq_configure_nic()
def adq_load_modules(self):
self.log_print("Modprobing ADQ-related Linux modules...")
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
for module in adq_module_deps:
try:
self.exec_cmd(["sudo", "modprobe", module])
self.log_print("%s loaded!" % module)
except CalledProcessError as e:
self.log_print("ERROR: failed to load module %s" % module)
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
def adq_configure_tc(self):
self.log_print("Configuring ADQ Traffic classes and filters...")
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
num_queues_tc1 = self.num_cores
port_param = "dst_port" if isinstance(self, Target) else "src_port"
port = "4420"
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
for nic_ip in self.nic_ips:
nic_name = self.get_nic_name_by_ip(nic_ip)
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
"root", "mqprio", "num_tc", "2", "map", "0", "1",
"queues", "%s@0" % num_queues_tc0,
"%s@%s" % (num_queues_tc1, num_queues_tc0),
"hw", "1", "mode", "channel"]
self.log_print(" ".join(tc_qdisc_map_cmd))
self.exec_cmd(tc_qdisc_map_cmd)
time.sleep(5)
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
self.log_print(" ".join(tc_qdisc_ingress_cmd))
self.exec_cmd(tc_qdisc_ingress_cmd)
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
"protocol", "ip", "ingress", "prio", "1", "flower",
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
"skip_sw", "hw_tc", "1"]
self.log_print(" ".join(tc_filter_cmd))
self.exec_cmd(tc_filter_cmd)
# show tc configuration
self.log_print("Show tc configuration for %s NIC..." % nic_name)
tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
self.log_print("%s" % tc_disk_out)
self.log_print("%s" % tc_filter_out)
# Ethtool coalesce settings must be applied after configuring traffic classes
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
xps_cmd = ["sudo", xps_script_path, nic_name]
self.log_print(xps_cmd)
self.exec_cmd(xps_cmd)
def adq_configure_nic(self):
self.log_print("Configuring NIC port settings for ADQ testing...")
# Reload the driver first, to make sure any previous settings are re-set.
try:
self.exec_cmd(["sudo", "rmmod", "ice"])
self.exec_cmd(["sudo", "modprobe", "ice"])
except CalledProcessError as e:
self.log_print("ERROR: failed to reload ice module!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
try:
self.exec_cmd(["sudo", "ethtool", "-K", nic,
"hw-tc-offload", "on"]) # Enable hardware TC offload
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
# As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
# Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "on"])
except CalledProcessError as e:
self.log_print("ERROR: failed to configure NIC port using ethtool!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
self.log_print("Please update your NIC driver and firmware versions and try again.")
self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
def configure_services(self):
self.log_print("Configuring active services...")
svc_config = configparser.ConfigParser(strict=False)
# Below list is valid only for RHEL / Fedora systems and might not
# contain valid names for other distributions.
svc_target_state = {
"firewalld": "inactive",
"irqbalance": "inactive",
"lldpad.service": "inactive",
"lldpad.socket": "inactive"
}
for service in svc_target_state:
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
svc_config.read_string(out)
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
continue
service_state = svc_config[service]["ActiveState"]
self.log_print("Current state of %s service is %s" % (service, service_state))
self.svc_restore_dict.update({service: service_state})
if service_state != "inactive":
self.log_print("Disabling %s. It will be restored after the test has finished." % service)
self.exec_cmd(["sudo", "systemctl", "stop", service])
def configure_sysctl(self):
self.log_print("Tuning sysctl settings...")
busy_read = 0
if self.enable_adq and self.mode == "spdk":
busy_read = 1
sysctl_opts = {
"net.core.busy_poll": 0,
"net.core.busy_read": busy_read,
"net.core.somaxconn": 4096,
"net.core.netdev_max_backlog": 8192,
"net.ipv4.tcp_max_syn_backlog": 16384,
"net.core.rmem_max": 268435456,
"net.core.wmem_max": 268435456,
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
"net.ipv4.route.flush": 1,
"vm.overcommit_memory": 1,
}
for opt, value in sysctl_opts.items():
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def configure_tuned(self):
if not self.tuned_profile:
self.log_print("WARNING: Tuned profile not set in configuration file. Skipping configuration.")
return
self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
service = "tuned"
tuned_config = configparser.ConfigParser(strict=False)
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
tuned_config.read_string(out)
tuned_state = tuned_config[service]["ActiveState"]
self.svc_restore_dict.update({service: tuned_state})
if tuned_state != "inactive":
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
self.tuned_restore_dict = {
"profile": profile,
"mode": profile_mode
}
self.exec_cmd(["sudo", "systemctl", "start", service])
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
def configure_cpu_governor(self):
self.log_print("Setting CPU governor to performance...")
# This assumes that there is the same CPU scaling governor on each CPU
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
def configure_irq_affinity(self):
self.log_print("Setting NIC irq affinity for NICs...")
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
irq_cmd = ["sudo", irq_script_path, nic]
self.log_print(irq_cmd)
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
def restore_services(self):
self.log_print("Restoring services...")
for service, state in self.svc_restore_dict.items():
cmd = "stop" if state == "inactive" else "start"
self.exec_cmd(["sudo", "systemctl", cmd, service])
def restore_sysctl(self):
self.log_print("Restoring sysctl settings...")
for opt, value in self.sysctl_restore_dict.items():
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def restore_tuned(self):
self.log_print("Restoring tuned-adm settings...")
if not self.tuned_restore_dict:
return
if self.tuned_restore_dict["mode"] == "auto":
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
self.log_print("Reverted tuned-adm to auto_profile.")
else:
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
def restore_governor(self):
self.log_print("Restoring CPU governor setting...")
if self.governor_restore:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
self.log_print("Reverted CPU governor to %s." % self.governor_restore)
class Target(Server):
def __init__(self, name, general_config, target_config):
super().__init__(name, general_config, target_config)
# Defaults
self.enable_sar = False
self.sar_delay = 0
self.sar_interval = 0
self.sar_count = 0
self.enable_pcm = False
self.pcm_dir = ""
self.pcm_delay = 0
self.pcm_interval = 0
self.pcm_count = 0
self.enable_bandwidth = 0
self.bandwidth_count = 0
self.enable_dpdk_memory = False
self.dpdk_wait_time = 0
self.enable_zcopy = False
self.scheduler_name = "static"
self.null_block = 0
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
self.subsystem_info_list = []
if "null_block_devices" in target_config:
self.null_block = target_config["null_block_devices"]
if "sar_settings" in target_config:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
if "pcm_settings" in target_config:
self.enable_pcm = True
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
if "enable_bandwidth" in target_config:
self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
if "enable_dpdk_memory" in target_config:
self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
if "scheduler_settings" in target_config:
self.scheduler_name = target_config["scheduler_settings"]
if "zcopy_settings" in target_config:
self.enable_zcopy = target_config["zcopy_settings"]
if "results_dir" in target_config:
self.results_dir = target_config["results_dir"]
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.set_local_nic_info(self.set_local_nic_info_helper())
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
stderr_opt = None
if stderr_redirect:
stderr_opt = subprocess.STDOUT
if change_dir:
old_cwd = os.getcwd()
os.chdir(change_dir)
self.log_print("Changing directory to %s" % change_dir)
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
if change_dir:
os.chdir(old_cwd)
self.log_print("Changing directory to %s" % old_cwd)
return out
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, _directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
@staticmethod
def read_json_stats(file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, _ in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, csv_file):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have different num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if x.startswith(job_name)]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
try:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
except JSONDecodeError:
self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!" % r)
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
cpu_number = os.cpu_count()
sar_idle_sum = 0
time.sleep(self.sar_delay)
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line:
if "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
elif "all" in line:
self.log_print(line)
else:
sar_idle_sum += float(line.split()[7])
fh.write(out)
sar_cpu_usage = cpu_number * 100 - sar_idle_sum
with open(os.path.join(results_dir, sar_file_name), "a") as f:
f.write("Total CPU used: " + str(sar_cpu_usage))
def ethtool_after_fio_ramp(self, fio_ramp_time):
time.sleep(fio_ramp_time//2)
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
out = self.exec_cmd(["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
self.log_print("INFO: starting network bandwidth measure")
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
"-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, general_config, initiator_config):
super().__init__(name, general_config, initiator_config)
# Required fields
self.ip = initiator_config["ip"]
self.target_nic_ips = initiator_config["target_nic_ips"]
# Defaults
self.cpus_allowed = None
self.cpus_allowed_policy = "shared"
self.spdk_dir = "/tmp/spdk"
self.fio_bin = "/usr/src/fio/fio"
self.nvmecli_bin = "nvme"
self.cpu_frequency = None
self.subsystem_info_list = []
if "spdk_dir" in initiator_config:
self.spdk_dir = initiator_config["spdk_dir"]
if "fio_bin" in initiator_config:
self.fio_bin = initiator_config["fio_bin"]
if "nvmecli_bin" in initiator_config:
self.nvmecli_bin = initiator_config["nvmecli_bin"]
if "cpus_allowed" in initiator_config:
self.cpus_allowed = initiator_config["cpus_allowed"]
if "cpus_allowed_policy" in initiator_config:
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
if "cpu_frequency" in initiator_config:
self.cpu_frequency = initiator_config["cpu_frequency"]
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.copy_spdk("/tmp/spdk.zip")
self.set_local_nic_info(self.set_local_nic_info_helper())
self.set_cpu_frequency()
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def stop(self):
self.ssh_connection.close()
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
if change_dir:
cmd = ["cd", change_dir, ";", *cmd]
# In case one of the command elements contains whitespace and is not
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
# when sending to remote system.
for i, c in enumerate(cmd):
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
cmd[i] = '"%s"' % c
cmd = " ".join(cmd)
# Redirect stderr to stdout thanks using get_pty option if needed
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
out = stdout.read().decode(encoding="utf-8")
# Check the return code
rc = stdout.channel.recv_exit_status()
if rc:
raise CalledProcessError(int(rc), cmd, out)
return out
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def copy_spdk(self, local_spdk_zip):
self.log_print("Copying SPDK sources to initiator %s" % self.name)
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
self.log_print("Sources unpacked")
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t", "%s" % self.transport,
"-s", "%s" % (4420 + subsys_no),
"-a", "%s" % ip]
try:
stdout = self.exec_cmd(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
except CalledProcessError:
# Do nothing. In case of discovering remote subsystems of kernel target
# we expect "nvme discover" to fail a bunch of times because we basically
# scan ports.
pass
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
self.subsystem_info_list = subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
rate_iops={rate_iops}
"""
if "spdk" in self.mode:
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = self.ioengine
spdk_conf = ""
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
self.num_cores = cpus_num
threads = range(0, self.num_cores)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
self.num_cores = len(subsystems)
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
# TODO: hipri disabled for now, as it causes fio errors:
# io_u error on file /dev/nvme2n1: Operation not supported
# See comment in KernelInitiator class, kernel_init_connect() function
if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
fio_config = fio_config + """
fixedbufs=1
registerfiles=1
#hipri=1
"""
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
try:
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
"--output=%s" % output_filename, "--eta=never"], True)
self.log_print(output)
except subprocess.CalledProcessError as e:
self.log_print("ERROR: Fio process failed!")
self.log_print(e.stdout)
else:
output_filename = job_name + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output" % output_filename], True)
self.log_print(output)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.exec_cmd(["uname", "-r"]))
self.log_print("====Kernel command line:====")
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
class KernelTarget(Target):
def __init__(self, name, general_config, target_config):
super().__init__(name, general_config, target_config)
# Defaults
self.nvmet_bin = "nvmetcli"
if "nvmet_bin" in target_config:
self.nvmet_bin = target_config["nvmet_bin"]
def stop(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
# Add remaining drives
for i, disk in enumerate(nvme_list[disks_per_ip * len(address_list):]):
disk_chunks[i].append(disk)
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % subsys_no
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": nqn
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": [nqn]
})
subsys_no += 1
port_no += 1
self.subsystem_info_list.append([port_no, nqn, ip])
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, general_config, target_config):
super().__init__(name, general_config, target_config)
# Required fields
self.core_mask = target_config["core_mask"]
self.num_cores = self.get_num_cores(self.core_mask)
# Defaults
self.dif_insert_strip = False
self.null_block_dif_type = 0
self.num_shared_buffers = 4096
self.max_queue_depth = 128
self.bpf_proc = None
self.bpf_scripts = []
self.enable_idxd = False
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
if "max_queue_depth" in target_config:
self.max_queue_depth = target_config["max_queue_depth"]
if "null_block_dif_type" in target_config:
self.null_block_dif_type = target_config["null_block_dif_type"]
if "dif_insert_strip" in target_config:
self.dif_insert_strip = target_config["dif_insert_strip"]
if "bpf_scripts" in target_config:
self.bpf_scripts = target_config["bpf_scripts"]
if "idxd_settings" in target_config:
self.enable_idxd = target_config["idxd_settings"]
self.log_print("====IDXD settings:====")
self.log_print("IDXD enabled: %s" % (self.enable_idxd))
@staticmethod
def get_num_cores(core_mask):
if "0x" in core_mask:
return bin(int(core_mask, 16)).count("1")
else:
num_cores = 0
core_mask = core_mask.replace("[", "")
core_mask = core_mask.replace("]", "")
for i in core_mask.split(","):
if "-" in i:
x, y = i.split("-")
num_cores += len(range(int(x), int(y))) + 1
else:
num_cores += 1
return num_cores
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
if self.enable_adq:
self.adq_configure_tc()
# Create transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
max_queue_depth=self.max_queue_depth,
dif_insert_or_strip=self.dif_insert_strip,
sock_priority=self.adq_priority)
self.log_print("SPDK NVMeOF transport layer:")
rpc_client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.null_block:
self.spdk_tgt_add_nullblock(self.null_block)
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
self.spdk_tgt_add_nvme_conf()
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc_client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
port = "4420"
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [[*num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i]] for i in range(0, len(ips))]
# Add remaining drives
for i, disk in enumerate(num_disks[disks_per_ip * len(ips):]):
disk_chunks[i].append(disk)
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client,
nqn=nqn,
trtype=self.transport,
traddr=ip,
trsvcid=port,
adrfam="ipv4")
self.subsystem_info_list.append([port, nqn, ip])
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc_client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def bpf_start(self):
self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
results_path = os.path.join(self.results_dir, "bpf_traces.txt")
with open(self.pid, "r") as fh:
nvmf_pid = str(fh.readline())
cmd = [bpf_script, nvmf_pid, *bpf_traces]
self.log_print(cmd)
self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initialize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc_client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send_server=True)
self.log_print("Target socket options:")
rpc_client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
if self.enable_adq:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
if self.enable_idxd:
rpc.idxd.idxd_scan_accel_engine(self.client, config_kernel_mode=None)
self.log_print("Target IDXD accel engine enabled")
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
if self.bpf_scripts:
self.bpf_start()
self.spdk_tgt_configure()
def stop(self):
if self.bpf_proc:
self.log_print("Stopping BPF Trace script")
self.bpf_proc.terminate()
self.bpf_proc.wait()
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super().__init__(name, general_config, initiator_config)
# Defaults
self.extra_params = ""
self.ioengine = "libaio"
if "extra_params" in initiator_config:
self.extra_params = initiator_config["extra_params"]
if "kernel_engine" in initiator_config:
self.ioengine = initiator_config["kernel_engine"]
if "io_uring" in self.ioengine:
self.extra_params = "--nr-poll-queues=8"
def get_connected_nvme_list(self):
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
return nvme_list
def kernel_init_connect(self):
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in self.subsystem_info_list:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
time.sleep(2)
if "io_uring" in self.ioengine:
self.log_print("Setting block layer settings for io_uring.")
# TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
# apparently it's not possible for connected subsystems.
# Results in "error: Invalid argument"
block_sysfs_settings = {
"iostats": "0",
"rq_affinity": "0",
"nomerges": "2"
}
for disk in self.get_connected_nvme_list():
sysfs = os.path.join("/sys/block", disk, "queue")
for k, v in block_sysfs_settings.items():
sysfs_opt_path = os.path.join(sysfs, k)
try:
self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
except subprocess.CalledProcessError as e:
self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
finally:
_ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
self.log_print("%s=%s" % (sysfs_opt_path, _))
def kernel_init_disconnect(self):
for subsystem in self.subsystem_info_list:
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super().__init__(name, general_config, initiator_config)
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.install_spdk()
# Required fields
self.num_cores = initiator_config["num_cores"]
# Optional fields
self.enable_data_digest = False
if "enable_data_digest" in initiator_config:
self.enable_data_digest = initiator_config["enable_data_digest"]
def install_spdk(self):
self.log_print("Using fio binary %s" % self.fio_bin)
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
self.log_print("SPDK built")
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
if self.enable_adq:
nvme_ctrl["params"].update({"priority": "1"})
if self.enable_data_digest:
nvme_ctrl["params"].update({"ddgst": self.enable_data_digest})
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
script_full_dir = os.path.dirname(os.path.realpath(__file__))
default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
help='Configuration file.')
parser.add_argument('-r', '--results', type=str, default='/tmp/results',
help='Results directory.')
parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
help='CSV results filename.')
args = parser.parse_args()
print("Using config file: %s" % args.config)
with open(args.config, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
general_config = data["general"]
target_config = data["target"]
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
for k, v in data.items():
if "target" in k:
v.update({"results_dir": args.results})
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(k, data["general"], v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(k, data["general"], v)
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(k, data["general"], v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(k, data["general"], v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
fio_rate_iops = 0
if "rate_iops" in data[k]:
fio_rate_iops = data[k]["rate_iops"]
else:
continue
try:
os.mkdir(args.results)
except FileExistsError:
pass
# TODO: This try block is definietly too large. Need to break this up into separate
# logical blocks to reduce size.
try:
target_obj.tgt_start()
for i in initiators:
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
if i.enable_adq:
i.adq_configure_tc()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect()
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
threads.append(t)
if target_obj.enable_adq:
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time,))
threads.append(ethtool_thread)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect()
i.copy_result_files(args.results)
target_obj.restore_governor()
target_obj.restore_tuned()
target_obj.restore_services()
target_obj.restore_sysctl()
for i in initiators:
i.restore_governor()
i.restore_tuned()
i.restore_services()
i.restore_sysctl()
target_obj.parse_results(args.results, args.csv_filename)
finally:
for i in initiators:
try:
i.stop()
except Exception as err:
pass
target_obj.stop()
|
from flask import Flask, request
import sqlalchemy
import sqlalchemy.orm
app = Flask(__name__)
engine = sqlalchemy.create_engine(...)
Base = sqlalchemy.orm.declarative_base()
class User(Base):
__tablename__ = "users"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
username = sqlalchemy.Column(sqlalchemy.String)
@app.route("/users/<username>")
def show_user(username):
session = sqlalchemy.orm.Session(engine)
# BAD, normal SQL injection
stmt = sqlalchemy.text("SELECT * FROM users WHERE username = '{}'".format(username))
results = session.execute(stmt).fetchall()
# BAD, allows SQL injection
username_formatted_for_sql = sqlalchemy.text("'{}'".format(username))
stmt = sqlalchemy.select(User).where(User.username == username_formatted_for_sql)
results = session.execute(stmt).scalars().all()
# GOOD, does not allow for SQL injection
stmt = sqlalchemy.select(User).where(User.username == username)
results = session.execute(stmt).scalars().all()
...
|
from threading import Thread
class Async_function( Thread ):
def __init__(self, func, args = 0 ):
Thread.__init__(self)
self.func = func
self.args = args
self.retr = 0
def run( self ):
self.retr = self.func( self.args )
def return_val(self):
return self.retr
|
'''
Created on 24.10.2019
@author: JM
'''
from PyTrinamic.ic.TMC2041.TMC2041_register import TMC2041_register
from PyTrinamic.ic.TMC2041.TMC2041_register_variant import TMC2041_register_variant
from PyTrinamic.ic.TMC2041.TMC2041_fields import TMC2041_fields
from PyTrinamic.helpers import TMC_helpers
class TMC2041():
"""
Class for the TMC2041 IC
"""
def __init__(self, channel):
self.__channel = channel
self.registers = TMC2041_register
self.fields = TMC2041_fields
self.variants = TMC2041_register_variant
self.MOTORS = 2
def showChipInfo(self):
print("TMC2041 chip info: The TMC2041 is a compact, dual stepper motor driver IC with serial interfaces for configuration and diagnostics. Voltage supply: 4.75 - 26V")
def writeRegister(self, registerAddress, value, channel):
raise NotImplementedError
def readRegister(self, registerAddress, channel):
raise NotImplementedError
def writeRegisterField(self, field, value):
return self.writeRegister(field[0], TMC_helpers.field_set(self.readRegister(field[0], self.__channel), field[1], field[2], value), self.__channel)
def readRegisterField(self, field):
return TMC_helpers.field_get(self.readRegister(field[0], self.__channel), field[1], field[2])
def moveBy(self, motor, distance, velocity):
if not(0 <= motor < self.MOTORS):
raise ValueError
position = self.readRegister(self.registers.XACTUAL, self.__channel, signed=True)
self.moveTo(motor, position + distance, velocity)
return position + distance
def get_pin_state(self):
pass
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/lib/zksk/nizk.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.lib.petlib import bn_pb2 as proto_dot_lib_dot_petlib_dot_bn__pb2
from syft.proto.lib.python import tuple_pb2 as proto_dot_lib_dot_python_dot_tuple__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/lib/zksk/nizk.proto",
package="syft.lib.zksk",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19proto/lib/zksk/nizk.proto\x12\rsyft.lib.zksk\x1a\x19proto/lib/petlib/bn.proto\x1a\x1cproto/lib/python/tuple.proto"l\n\x04NIZK\x12&\n\tchallenge\x18\x01 \x01(\x0b\x32\x13.syft.lib.petlib.Bn\x12)\n\tresponses\x18\x02 \x01(\x0b\x32\x16.syft.lib.python.Tuple\x12\x11\n\tstmt_hash\x18\x03 \x01(\x0c\x62\x06proto3',
dependencies=[
proto_dot_lib_dot_petlib_dot_bn__pb2.DESCRIPTOR,
proto_dot_lib_dot_python_dot_tuple__pb2.DESCRIPTOR,
],
)
_NIZK = _descriptor.Descriptor(
name="NIZK",
full_name="syft.lib.zksk.NIZK",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="challenge",
full_name="syft.lib.zksk.NIZK.challenge",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="responses",
full_name="syft.lib.zksk.NIZK.responses",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="stmt_hash",
full_name="syft.lib.zksk.NIZK.stmt_hash",
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=101,
serialized_end=209,
)
_NIZK.fields_by_name[
"challenge"
].message_type = proto_dot_lib_dot_petlib_dot_bn__pb2._BN
_NIZK.fields_by_name[
"responses"
].message_type = proto_dot_lib_dot_python_dot_tuple__pb2._TUPLE
DESCRIPTOR.message_types_by_name["NIZK"] = _NIZK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NIZK = _reflection.GeneratedProtocolMessageType(
"NIZK",
(_message.Message,),
{
"DESCRIPTOR": _NIZK,
"__module__": "proto.lib.zksk.nizk_pb2"
# @@protoc_insertion_point(class_scope:syft.lib.zksk.NIZK)
},
)
_sym_db.RegisterMessage(NIZK)
# @@protoc_insertion_point(module_scope)
|
from lng_abstract_tag import AbstractTag
class lng_element_tag(AbstractTag):
def __init__(self, app, **args):
if args.has_key("fields"):
AbstractTag.__init__(self, app, "lng_element_tag", args["fields"])
else:
AbstractTag.__init__(self, app, "lng_element_tag", args)
from dbobj.dbobj import dbTable
class lng_element_tags(dbTable):
"""
Collection class for all element_tag data.
"""
def __init__(self, app):
dbTable.__init__(self, app, "lng_element_tag", lng_element_tag)
__copyright__="""
/***************************************************************************
copyright : (C) 2000 by Boudewijn Rempt
see copyright notice for license
email : boud@rempt.xs4all.nl
Revision : $Revision: 1.2 $
Last edited : $Date: 2002/11/16 13:43:59 $
***************************************************************************/
"""
|
# -*- coding: utf-8 -*-
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
#根据环境定义数据库连接参数
mysql_name = 'z_db'
mysql_user = 'root'
mysql_pass = '888888'
mysql_host = ''
mysql_host_s = ''
mysql_port = ''
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': mysql_name, # Or path to database file if using sqlite3.
'USER': mysql_user, # Not used with sqlite3.
'PASSWORD': mysql_pass, # Not used with sqlite3.
'HOST': mysql_host, # Set to empty string for localhost. Not used with sqlite3.
'PORT': mysql_port, # Set to empty string for default. Not used with sqlite3.
},
'slave': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': mysql_name, # Or path to database file if using sqlite3.
'USER': mysql_user, # Not used with sqlite3.
'PASSWORD': mysql_pass, # Not used with sqlite3.
'HOST': mysql_host_s, # Set to empty string for localhost. Not used with sqlite3.
'PORT': mysql_port, # Set to empty string for default. Not used with sqlite3.
}
}
DATABASE_ROUTERS = ['zjblog.masterslaverouter.MasterSlaveRouter']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Shanghai'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'#设置django界面语言
#SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
#MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
#MEDIA_URL = ''
#STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'app!kn5fj5zlat%3gs)cu73be#mc3y+%$me5$dw6s2$(%i*tps'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.middleware.csrf.CsrfResponseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.flatpages.middleware.flatpageFallbackMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join('BASE_DIR','templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.messages',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'mptt',#django-mptt
'zjblog',
'taggit',
'captcha',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
)
|
# lint-amnesty, pylint: disable=missing-module-docstring
import csv
from logging import getLogger
from django import forms
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.auth import get_user_model
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from .models import ExternalId, ExternalIdType
User = get_user_model()
logger = getLogger(__name__)
class CsvImportForm(forms.Form):
csv_file = forms.FileField(label='CSV File')
id_type = forms.ModelChoiceField(
label='External ID Type',
queryset=ExternalIdType.objects.all()
)
@admin.register(ExternalId)
class ExternalIdAdmin(admin.ModelAdmin): # lint-amnesty, pylint: disable=missing-class-docstring
change_list_template = 'admin/external_user_ids/generate_external_user_ids.html'
list_display = ('user', 'external_user_id', 'external_id_type')
template = 'openedx/core/djangoapps/external_user_ids/templates/admin/generate_external_ids_template.html'
def get_urls(self):
urls = super().get_urls()
custom_urls = [
url(
r'^bulk_generate_external_ids/$',
self.admin_site.admin_view(self.generate_ids_form),
name='bulk_generate_external_ids'
),
]
return custom_urls + urls
def _generate_results_msg(self, user_id_list, unknown_users, created_id_list, existing_id):
return (
f'Attempted to create for: {user_id_list}\n' +
f'Could not find: {unknown_users}\n' +
f'Created External IDs for: {created_id_list}\n' +
f'External IDs already exist for: {existing_id}\n'
)
def process_generate_ids_request(self, user_id_list, id_type, request, redirect_url): # lint-amnesty, pylint: disable=missing-function-docstring
created_id_list = []
existing_id = []
user_list = User.objects.filter(
id__in=user_id_list
)
for user in user_list:
new_external_id, created = ExternalId.objects.get_or_create( # lint-amnesty, pylint: disable=unused-variable
user=user,
external_id_type=id_type,
)
if created:
created_id_list.append(user.id)
else:
existing_id.append(user.id)
found_user_ids = created_id_list + existing_id
unknown_users = list(set(user_id_list) - set(found_user_ids))
result_msg = self._generate_results_msg(user_id_list, unknown_users, created_id_list, existing_id)
logger.info(result_msg)
self.message_user(
request,
result_msg,
level=messages.SUCCESS)
return HttpResponseRedirect(redirect_url)
def _render_form(self, request, form):
context = {
'form': form
}
return render(
request,
'admin/external_user_ids/generate_external_ids_form.html',
context
)
def generate_ids_form(self, request): # lint-amnesty, pylint: disable=missing-function-docstring
if request.method == 'POST':
redirect_url = reverse(
'admin:external_user_ids_externalid_changelist',
current_app=self.admin_site.name,
)
upload_file = request.FILES.get('csv_file')
id_type = request.POST.get('id_type')
if not upload_file or not id_type:
self.message_user(request, 'CSV file and type are required.', level=messages.ERROR)
return HttpResponseRedirect(redirect_url)
try:
id_type = ExternalIdType.objects.get(id=id_type)
except ExternalIdType.DoesNotExist:
self.message_user(request, 'ID Type selected does not exist', level=messages.ERROR)
return HttpResponseRedirect(redirect_url)
reader = csv.reader(upload_file.read().decode('utf-8').splitlines())
headers = next(reader, None)
if len(headers) != 1 or 'ID' not in headers:
self.message_user(
request,
'File is incorrectly formatted. To many columns or incorrectly named ID column',
level=messages.ERROR
)
return HttpResponseRedirect(redirect_url)
try:
user_ids = [int(row[0]) for row in reader]
except ValueError:
self.message_user(
request,
'Data is incorrectly formatted. All ids must be integers',
level=messages.ERROR
)
return HttpResponseRedirect(redirect_url)
return self.process_generate_ids_request(user_ids, id_type, request, redirect_url)
form = CsvImportForm()
return self._render_form(request, form)
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
from tornado import gen
# import sys
# import os
# sys.path.append(os.path.abspath('/data/stock/libs'))
import libs.stock_web_dic as stock_web_dic
import web.base as webBase
import logging
import re
# 获得页面数据。
class GetEditorHtmlHandler(webBase.BaseHandler):
@gen.coroutine
def get(self):
name = self.get_argument("table_name", default=None, strip=False)
stockWeb = stock_web_dic.STOCK_WEB_DATA_MAP[name]
# self.uri_ = ("self.request.url:", self.request.uri)
# print self.uri_
self.render("data_editor.html", stockWeb=stockWeb, leftMenu=webBase.GetLeftMenu(self.request.uri))
# 拼接sql,将value的key 和 value 放到一起。
def genSql(primary_key, param_map, join_string):
tmp_sql = ""
idx = 0
for tmp_key in primary_key:
tmp_val = param_map[tmp_key]
if idx == 0:
tmp_sql = " `%s` = '%s' " % (tmp_key, tmp_val)
else:
tmp_sql += join_string + (" `%s` = '%s' " % (tmp_key, tmp_val))
idx += 1
return tmp_sql
# 获得页面数据。
class SaveEditorHandler(webBase.BaseHandler):
@gen.coroutine
def post(self):
action = self.get_argument("action", default=None, strip=False)
logging.info(action)
table_name = self.get_argument("table_name", default=None, strip=False)
stockWeb = stock_web_dic.STOCK_WEB_DATA_MAP[table_name]
# 临时map数组。
param_map = {}
# 支持多排序。使用shift+鼠标左键。
for item, val in self.request.arguments.items():
# 正则查找 data[1112][code] 里面的code字段
item_key = re.search(r"\]\[(.*?)\]", item)
if item_key:
tmp_1 = item_key.group()
if tmp_1:
tmp_1 = tmp_1.replace("][", "").replace("]", "")
param_map[tmp_1] = val[0].decode("utf-8")
#logging.info(param_map)
if action == "create":
logging.info("###########################create")
# 拼接where 和 update 语句。
tmp_columns = "`, `".join(stockWeb.columns)
tmp_values = []
for tmp_key in stockWeb.columns:
tmp_values.append(param_map[tmp_key])
# 更新sql。
tmp_values2 = "', '".join(tmp_values)
insert_sql = " INSERT INTO %s (`%s`) VALUES('%s'); " % (stockWeb.table_name, tmp_columns, tmp_values2)
logging.info(insert_sql)
try:
self.db.execute(insert_sql)
except Exception as e:
err = {"error": str(e)}
logging.info(err)
self.write(err)
return
elif action == "edit":
logging.info("###########################edit")
# 拼接where 和 update 语句。
tmp_update = genSql(stockWeb.columns, param_map, ",")
tmp_where = genSql(stockWeb.primary_key, param_map, "and")
# 更新sql。
update_sql = " UPDATE %s SET %s WHERE %s " % (stockWeb.table_name, tmp_update, tmp_where)
logging.info(update_sql)
try:
self.db.execute(update_sql)
except Exception as e:
err = {"error": str(e)}
logging.info(err)
self.write(err)
return
elif action == "remove":
logging.info("###########################remove")
# 拼接where 语句。
tmp_where = genSql(stockWeb.primary_key, param_map, "and")
# 更新sql。
delete_sql = " DELETE FROM %s WHERE %s " % (stockWeb.table_name, tmp_where)
logging.info(delete_sql)
try:
self.db.execute(delete_sql)
except Exception as e:
err = {"error": str(e)}
logging.info(err)
self.write(err)
return
self.write("{\"data\":[{}]}")
|
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
timezones = {
"London": "UTC+1:00",
"Surat": "UTC+5:00",
"Mumbai": "UTC+3:00",
"New York": "UTC-56:30",
}
class ActionShowTimeZone(Action):
def name(self) -> Text:
return "action_show_time_zone"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
city = tracker.get_slot("city")
timezone = timezones.get(city)
if timezone is None:
dispatcher.utter_message(text="Time zone of {} is not known to me,Sorry!.".format(city))
else:
dispatcher.utter_message(text="Time zone of {} is {}.".format(city,timezone))
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.