hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7008e5c02694d2642ce94ad63ea171b35f0c2ae5 | 1,549 | py | Python | src/pretix/helpers/jsonlogic_query.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/helpers/jsonlogic_query.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/helpers/jsonlogic_query.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import logging
from datetime import timedelta
from django.db.models import Func, Value
logger = logging.getLogger(__name__)
class Equal(Func):
arg_joiner = ' = '
arity = 2
function = ''
class GreaterThan(Func):
arg_joiner = ' > '
arity = 2
function = ''
class GreaterEqualThan(Func):
arg_joiner = ' >= '
arity = 2
function = ''
class LowerEqualThan(Func):
arg_joiner = ' < '
arity = 2
function = ''
class LowerThan(Func):
arg_joiner = ' < '
arity = 2
function = ''
class InList(Func):
arity = 2
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
# This ignores the special case for databases which limit the number of
# elements which can appear in an 'IN' clause, which hopefully is only Oracle.
lhs, lhs_params = compiler.compile(self.source_expressions[0])
if not isinstance(self.source_expressions[1], Value) and not isinstance(self.source_expressions[1].value, (list, tuple)):
raise TypeError(f'Dynamic right-hand-site currently not implemented, found {type(self.source_expressions[1])}')
rhs, rhs_params = ['%s' for _ in self.source_expressions[1].value], [d for d in self.source_expressions[1].value]
return '%s IN (%s)' % (lhs, ', '.join(rhs)), lhs_params + rhs_params
def tolerance(b, tol=None, sign=1):
if tol:
return b + timedelta(minutes=sign * float(tol))
return b
| 25.816667 | 129 | 0.656553 |
588026f20248c378c7a8eb883e0574d96ead26d2 | 904 | py | Python | 03_perfectly_spherical_houses_in_vacuum.py | KanegaeGabriel/advent-of-code-2015 | e6c7beb18fb708d3494125b189530cd0ed4c5d57 | [
"MIT"
] | 1 | 2020-10-22T18:07:44.000Z | 2020-10-22T18:07:44.000Z | 03_perfectly_spherical_houses_in_vacuum.py | KanegaeGabriel/advent-of-code-2015 | e6c7beb18fb708d3494125b189530cd0ed4c5d57 | [
"MIT"
] | null | null | null | 03_perfectly_spherical_houses_in_vacuum.py | KanegaeGabriel/advent-of-code-2015 | e6c7beb18fb708d3494125b189530cd0ed4c5d57 | [
"MIT"
] | null | null | null | #########################################################
# --- Day 3: Perfectly Spherical Houses in a Vacuum --- #
#########################################################
import AOCUtils
#########################################################
directions = AOCUtils.loadInput(3)
moves = {"^": (0, -1), "v": (0, 1), ">": (1, 0), "<": (-1, 0)}
cur = (0, 0)
houses = set([cur])
for d in directions:
move = moves[d]
cur = (cur[0]+move[0], cur[1]+move[1])
houses.add(cur)
print("Part 1: {}".format(len(houses)))
# Santa movements
cur = (0, 0)
houses = set([cur])
for d in directions[::2]:
move = moves[d]
cur = (cur[0]+move[0], cur[1]+move[1])
houses.add(cur)
# Robo-Santa movements
cur = (0, 0)
for d in directions[1::2]:
move = moves[d]
cur = (cur[0]+move[0], cur[1]+move[1])
houses.add(cur)
print("Part 2: {}".format(len(houses)))
AOCUtils.printTimeTaken() | 23.179487 | 62 | 0.460177 |
fdee06f67f88086f3e205c6bc60f5ead704f97d6 | 13,103 | py | Python | Sketches/MPS/BugReports/FixTests/Axon/Axon/Introspector.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MPS/BugReports/FixTests/Axon/Axon/Introspector.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MPS/BugReports/FixTests/Axon/Axon/Introspector.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/env python
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===============================================
Detecting the topology of a running Axon system
===============================================
The Introspector is a component that introspects the current local topology of
an Axon system - that is what components there are and how they are wired up.
It continually outputs any changes that occur to the topology.
Example Usage
-------------
Introspect and display whats going on inside the system::
MyComplexSystem().activate()
pipeline( Introspector(),
AxonVisualiserServer(noServer=True),
)
More detail
-----------
Once activated, this component introspects the current local topology of an Axon
system.
Local? This component examines its scheduler to find components and postmen.
It then examines them to determine their inboxes and outboxes and the linkages
between them. In effect, it determines the current topology of the system.
If this component is not active, then it will see no scheduler and will report
nothing.
What is output is how the topology changes. Immediately after activation, the
topology is assumed to be empty, so the first set of changes describes adding
nodes and linkages to the topology to build up the current state of it.
Subsequent output just describes the changes - adding or deleting linkages and
nodes as appropriate.
Nodes in the topology represent components and postboxes. A linkage between
a component node and a postbox node expresses the fact that that postbox belongs
to that component. A linkage between two postboxes represents a linkage in the
Axon system, from one component to another.
This topology change data is output as string containing one or more lines. It
is output through the "outbox" outbox. Each line may be one of the following:
* `"DEL ALL"`
- the first thing sent immediately after activation - to ensure that
the receiver of this data understand that we are starting from nothing
* `"ADD NODE <id> <name> randompos component"`
* `"ADD NODE <id> <name> randompos inbox"`
* `"ADD NODE <id> <name> randompos outbox"`
- an instruction to add a node to the topology, representing a component,
inbox or outbox. <id> is a unique identifier. <name> is a 'friendly'
textual label for the node.
* `"DEL NODE <id>"`
- an instruction to delete a node, specified by its unique id
* `"ADD LINK <id1> <id2>"`
- an instruction to add a link between the two identified nodes. The link is
deemed to be directional, from <id1> to <id2>
* `"DEL LINK <id1> <id2>"`
- an instruction to delete any link between the two identified nodes. Again,
the directionality is from <id1> to <id2>.
the <id> and <name> fields may be encapsulated in double quote marks ("). This
will definitely be so if they contain space characters.
If there are no topology changes then nothing is output.
This component ignores anything arriving at its "inbox" inbox.
If a shutdownMicroprocess message is received on the "control" inbox, it is sent
on to the "signal" outbox and the component will terminate.
How does it work?
-----------------
Every execution timeslice, Introspector queries its scheduler to obtain a list
of all components. It then queries the postoffice in each component to build a
picture of all linkages between components. It also builds a list of all inboxes
and outboxes on each component.
This is mapped to a list of nodes and linkages. Nodes being components and
postboxes; and linkages being what postboxes belong to what components, and what
postboxes are linked to what postboxes.
This is compared against the nodes and linkages from the previous cycle of
processing to determine what has changed. The changes are then output as a
sequence of "ADD NODE", "DEL NODE", "ADD LINK" and "DEL LINK" commands.
"""
import Axon.Component as Component
import Axon.Scheduler as Scheduler
import Axon.Ipc as Ipc
class Introspector(Component.component):
"""\
Introspector() -> new Introspector component.
Outputs topology (change) data describing what components there are, and
how they are wired inside the running Axon system.
"""
Inboxes = { "inbox" : "NOT USED",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Topology (change) data describing the Axon system",
"signal" : "Shutdown signalling",
}
# passthrough==0 -> outbox > inbox
# passthrough==1 -> inbox > inbox
# passthrough==2 -> outbox > outbox
srcBoxType = { 0:"o", 1:"i", 2:"o" }
dstBoxType = { 0:"i", 1:"i", 2:"o" }
def main(self):
"""Main loop."""
# reset the receiving 'axon visualiser'
self.send("DEL ALL\n", "outbox")
yield 1
nodes = dict()
linkages = dict()
while 1:
# shutdown if requested
if self.dataReady("control"):
data = self.recv("control")
if isinstance(data, Ipc.shutdownMicroprocess):
self.send(data, "signal")
return
if isinstance(self.scheduler, Scheduler.scheduler):
oldNodes = nodes
oldLinkages = linkages
nodes = dict()
linkages = dict()
components, postboxes,linkages = self.introspect()
# now it is safe to yield if we wish to, since we've not snapshotted all system state we need
# now go through building the new set of nodes
# if we find one already in oldNodes, delete it from there,
# so that at the end oldNodes contains only 'differences'
# if not already there, then add it to the 'addmsgs' output
# if the node being added is a postbox, then also build the
# 'add link' message to join it to its parent component
addnodemsgs = ""
delnodemsgs = ""
addlinkmsgs = ""
dellinkmsgs = ""
# build topology nodes - one node per component, one per postbox on each component
for id in components.keys():
if id not in nodes: # incase component activated twice (twice in scheduler.threads)
name = components[id]
nodes[ id ] = name
if id in oldNodes:
del oldNodes[id]
else:
addnodemsgs += 'ADD NODE "'+str(id)+'" "'+str(name)+'" randompos component\n'
# build nodes for postboxes, and also link them to the components to which they belong
for id in postboxes:
if id not in nodes:
nodes[ id ] = name
if id in oldNodes:
del oldNodes[id]
else:
(cid, io, name) = id
addnodemsgs += 'ADD NODE "'+str(id)+'" "'+str(name)+'" randompos '
if io=="i":
addnodemsgs += "inbox\n"
else:
addnodemsgs += "outbox\n"
addnodemsgs += 'ADD LINK "'+str(cid)+'" "'+str(id)+'"\n'
# now addmsgs contains msgs to create new nodes
# and oldNodes only contains nodes that no longer exist
for id in oldNodes.keys():
delnodemsgs += 'DEL NODE "'+str(id)+'"\n'
# now go through inter-postbox linkages and do the same as we did for nodes
# note, we check not only that the link exists, but that it still goes to the same thing!
# otherwise leave the old link to be destroyed, and add a new one
for (src,dst) in linkages.keys():
if (src, dst) in oldLinkages:
del oldLinkages[(src,dst)]
else:
addlinkmsgs += 'ADD LINK "'+str(src)+'" "'+str(dst)+'"\n'
# delete linkages that no longer exist
for (src,dst) in oldLinkages.keys():
dellinkmsgs += 'DEL LINK "'+str(src)+'" "'+str(dst)+'"\n'
# note: order of the final messages is important - delete old things
# before adding new
# and del links before nodes and add nodes before links
msg = dellinkmsgs + delnodemsgs + addnodemsgs + addlinkmsgs
if msg.strip() != "":
self.send(msg, "outbox")
yield 1
def introspect(self):
"""\
introspect() -> components, postboxes, linkages
Returns the current set of components, postboxes and interpostbox linkages.
- components -- a dictionary, containing components as keys
- postboxes -- a list of (component.id, type, "boxname") tuples, where type="i" (inbox) or "o" (outbox)
- linkages -- a dictionary containing (postbox,postbox) tuples as keys, where postbox is a tuple from the postboxes list
"""
# fetch components currently active with the scheduler
# (note that this is not necessarily all components - as they may have only just been
# activated, in which case they may not register yet)
threads = self.scheduler.listAllThreads()
components = dict([ (p,(p.id,p.name)) for p in threads if isinstance(p, Component.component) ])
# go through all components' postoffices and find all linkages
linkages = {}
components_to_scan = list(components.keys()) # list
for postoffice in [ c.postoffice for c in components_to_scan ]:
for link in postoffice.linkages:
src = (link.source.id, Introspector.srcBoxType[link.passthrough], link.sourcebox)
dst = (link.sink.id , Introspector.dstBoxType[link.passthrough], link.sinkbox)
linkages[(src,dst)] = 1
# some components may not have been detected from the scheduler
# but maybe linked to, so we need to detect them now
# 1) append to the list we're scanning now
# 2) add to the dictionary of components we're building
if link.source not in components:
components_to_scan.append(link.source)
components[link.source] = (link.source.id, link.source.name)
if link.sink not in components:
components_to_scan.append(link.sink)
components[link.sink] = (link.sink.id, link.sink.name)
# now we have a comprehensive list of all components (not just those the scheduler
# admits to!) we can now build the list of all postboxes
postboxes = []
for c in components.keys():
postboxes += [ (c.id, "i", boxname) for boxname in c.inboxes.keys() ]
postboxes += [ (c.id, "o", boxname) for boxname in c.outboxes.keys() ]
# strip the direct reference to component objects from the dictionary, leaving
# just a mapping from 'id' to 'name'
cdict = dict([ components[c] for c in components.keys() ])
return cdict, postboxes, linkages
__kamaelia_components__ = ( Introspector, )
if __name__ == '__main__':
i = Introspector()
i.activate()
from Kamaelia.Util.ConsoleEcho import consoleEchoer
e = consoleEchoer()
e.activate()
i.link((i,"outbox"), (e, "inbox"))
print("You should see the Introspector find that it and a consoleEchoer component exist.")
print("We both have inbox, control, signal and outbox postboxes")
print("The Introspector's outbox is linked to the consoleEchoer's inbox")
print()
Scheduler.scheduler.run.runThreads(slowmo=0)
| 41.596825 | 131 | 0.600168 |
6220745990745355e096f0d3dd8aaf513161e450 | 2,514 | py | Python | test/test_texts_api.py | dialmycalls/python-sdk-v2 | ab6ac61d305ea1729b618bc2530d6101136aa6ea | [
"Apache-2.0"
] | 2 | 2020-07-29T08:51:36.000Z | 2021-01-21T11:18:24.000Z | test/test_texts_api.py | dialmycalls/python-sdk-v2 | ab6ac61d305ea1729b618bc2530d6101136aa6ea | [
"Apache-2.0"
] | null | null | null | test/test_texts_api.py | dialmycalls/python-sdk-v2 | ab6ac61d305ea1729b618bc2530d6101136aa6ea | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
DialMyCalls API
The DialMyCalls API
OpenAPI spec version: 2.0.1
Contact: support@dialmycalls.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import dialmycalls_client
from dialmycalls_client.rest import ApiException
from dialmycalls_client.apis.texts_api import TextsApi
class TestTextsApi(unittest.TestCase):
""" TextsApi unit test stubs """
def setUp(self):
self.api = dialmycalls_client.apis.texts_api.TextsApi()
def tearDown(self):
pass
def test_cancel_text_by_id(self):
"""
Test case for cancel_text_by_id
Cancel Text
"""
pass
def test_create_text(self):
"""
Test case for create_text
Create Text
"""
pass
def test_delete_incoming_text_by_id(self):
"""
Test case for delete_incoming_text_by_id
Delete Incoming Text
"""
pass
def test_get_incoming_text_by_id(self):
"""
Test case for get_incoming_text_by_id
Get Incoming Text
"""
pass
def test_get_incoming_texts(self):
"""
Test case for get_incoming_texts
List Incoming Texts
"""
pass
def test_get_short_codes(self):
"""
Test case for get_short_codes
List Shortcodes
"""
pass
def test_get_text_by_id(self):
"""
Test case for get_text_by_id
Get Text
"""
pass
def test_get_text_recipients_by_text_id(self):
"""
Test case for get_text_recipients_by_text_id
Get Text Recipients
"""
pass
def test_get_texts(self):
"""
Test case for get_texts
List Texts
"""
pass
if __name__ == '__main__':
unittest.main()
| 20.95 | 76 | 0.631663 |
030f068479e68b67e060c0a9002a7d3edf40d028 | 3,626 | py | Python | bindings/python/ensmallen/datasets/string/candidatusuhrbacteriabacteriumrifoxyb2full4511.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/candidatusuhrbacteriabacteriumrifoxyb2full4511.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/candidatusuhrbacteriabacteriumrifoxyb2full4511.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusUhrbacteriaBacteriumRifoxyb2Full4511(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Uhrbacteria bacterium RIFOXYB2_FULL_45_11 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusUhrbacteriaBacteriumRifoxyb2Full4511",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.533333 | 223 | 0.689741 |
bf8448e54c38d8540dd24df60f9b7b61bfa2116b | 8,776 | py | Python | backend/src/contaxy/schema/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 3 | 2021-10-17T23:25:05.000Z | 2022-02-03T21:40:59.000Z | backend/src/contaxy/schema/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 14 | 2021-11-09T15:24:29.000Z | 2022-03-11T13:26:04.000Z | backend/src/contaxy/schema/deployment.py | ml-tooling/contaxy | 3317a866c2ef641667a2d318885c8b0f5096b56a | [
"MIT"
] | 3 | 2022-01-27T08:31:57.000Z | 2022-02-11T13:38:00.000Z | from datetime import datetime
from enum import Enum
from typing import Dict, List, Optional
from fastapi import Path
from pydantic import BaseModel, Field
from contaxy.schema.shared import Resource, ResourceInput
SERVICE_ID_PARAM = Path(
...,
title="Service ID",
description="A valid Service ID.",
# TODO: add length restriction
)
JOB_ID_PARAM = Path(
...,
title="Job ID",
description="A valid job ID.",
# TODO: add length restriction
)
class DeploymentType(str, Enum):
CORE_BACKEND = "core-backend"
SERVICE = "service"
JOB = "job"
EXTENSION = "extension"
class DeploymentStatus(str, Enum):
# Deployment created, but not ready for usage
PENDING = "pending" # Alternative naming: waiting
# Deployment is running and usable
RUNNING = "running"
# Deployment was stopped with successful exit code (== 0).
SUCCEEDED = "succeeded"
# Deployment was stopped with failure exit code (> 0).
FAILED = "failed"
# Deployment was deleted and is now terminating the pods.
TERMINATING = "terminating"
# Deployment state cannot be obtained.
UNKNOWN = "unknown"
# Deployment is paused (only on docker?)/
# PAUSED = "paused"
# Other possible options:
# KILLED = "killed"
# STARTING("starting"),
# STOPPPED("stopped"),
# CREATED("created"),
# REBOOTING
# TERMINATED: Container is terminated. This container can’t be started later on.
# STOPPED: Container is stopped. This container can be started later on.
# ERROR – Container is an error state. Usually no operations can be performed on the container once it ends up in the error state.
# SUSPENDED: Container is suspended.
class DeploymentCompute(BaseModel):
min_cpus: Optional[int] = Field(
None,
example=2,
ge=0,
description="Minimum number of CPU cores required by this deployment. The system will make sure that atleast this amount is available to the deployment.",
)
max_cpus: Optional[int] = Field(
None,
example=4,
ge=0,
description="Maximum number of CPU cores. Even so the system will try to provide the specified amount, it's only guaranteed that the deployment cannot use more. 0 means unlimited.",
)
min_memory: Optional[int] = Field(
None,
example=4000,
ge=5, # 4 is the minimal RAM needed for containers
description="Minimum amount of memory in Megabyte required by this deployment. The system will make sure that atleast this amount is available to the deployment.",
)
max_memory: Optional[int] = Field(
None,
example=8000,
ge=0,
description="Maximum amount of memory in Megabyte. Even so the system will try to provide the specified amount, it's only guaranteed that the deployment cannot use more. 0 means unlimited.",
) # in MB
min_gpus: Optional[int] = Field(
None,
example=1,
ge=0,
description="Minimum number of GPUs required by this deployments. The system will make sure that atleast this amount is available to the deployment.",
)
max_gpus: Optional[int] = Field(
None,
example=2,
ge=0,
description="Maximum number of GPUs. Even so the system will try to provide the specified amount, it's only guaranteed that the deployment cannot use more.",
)
volume_path: Optional[str] = Field(
None,
example="/path/to/data",
description="Container internal directory that should mount a volume for data persistence.",
)
# TODO: min_volume_size
max_volume_size: Optional[int] = Field(
None,
example=32000,
ge=1,
description="Maximum volume size in Megabyte. This is only applied in combination with volume_path.",
)
# TODO: min_container_size
max_container_size: Optional[int] = Field(
None,
example=32000,
ge=1,
description="Maximum container size in Megabyte. The deployment will be killed if it grows above this limit.",
)
# TODO: min_replicas
max_replicas: Optional[int] = Field(
1,
example=2,
ge=1,
description="Maximum number of deployment instances. The system will make sure to optimize the deployment based on the available resources and requests. Use 1 if the deployment is not scalable.",
)
# TODO: use timedelta
min_lifetime: Optional[int] = Field(
None,
example=86400,
description="Minimum guaranteed lifetime in seconds. Once the lifetime is reached, the system is allowed to kill the deployment in case it requires additional resources.",
)
class DeploymentBase(BaseModel):
container_image: str = Field(
...,
example="hello-world:latest",
max_length=2000,
description="The container image used for this deployment.",
)
parameters: Optional[Dict[str, str]] = Field(
None,
example={"TEST_PARAM": "param-value"},
description="Parmeters (enviornment variables) for this deployment.",
)
compute: Optional[DeploymentCompute] = Field(
None,
description="Compute instructions and limitations for this deployment.",
)
command: Optional[str] = Field(
None,
description="Command to run within the deployment. This overwrites the existing entrypoint.",
)
requirements: Optional[List[str]] = Field(
None,
description="Additional requirements for deployment.",
)
endpoints: Optional[List[str]] = Field(
None,
example=["8080", "9001/webapp/ui", "9002b"],
description="A list of HTTP endpoints that can be accessed. This should always have an internal port and can include additional instructions, such as the URL path.",
)
# TODO: v2
# input_files: Optional[List[dict]] = Field(
# None,
# description="A list of files that should be added to the deployment.",
# )
# TODO: v2
# command_args: Optional[List[str]] = Field(
# None,
# description="Arguments to use for the command of the deployment. This overwrites the existing arguments.",
# )
class DeploymentInput(ResourceInput, DeploymentBase):
pass
class Deployment(Resource, DeploymentBase):
started_at: Optional[datetime] = Field(
None,
description="Timestamp when the deployment was started.",
)
stopped_at: Optional[datetime] = Field(
None,
description="Timestamp when the container has stopped.",
)
extension_id: Optional[str] = Field(
None,
description="The extension ID in case the deployment is deployed via an extension.",
)
deployment_type: Optional[DeploymentType] = Field(
None,
description="The type of this deployment.",
)
status: Optional[DeploymentStatus] = Field(
None,
example=DeploymentStatus.RUNNING,
description="The status of this deployment.",
)
internal_id: Optional[str] = Field(
None,
example="73d247087fea5bfb3a67e98da6a07f5bf4e2a90e5b52f3c12875a35600818376",
description="The ID of the deployment on the orchestration platform.",
)
# TODO: All labels should be transformed into the metadata or additional_metadata
# deployment_labels: Optional[Dict[str, str]] = Field(
# None,
# example={"foo.bar.label": "label-value"},
# description="The labels of the deployment on the orchestration platform.",
# )
# TODO: should be a debug information.
# exit_code: Optional[int] = Field(
# None,
# example=0,
# description="The Exit code of the container, in case the container was stopped.",
# )
class ServiceBase(BaseModel):
graphql_endpoint: Optional[str] = Field(
None,
example="8080/graphql",
description="GraphQL endpoint.",
)
openapi_endpoint: Optional[str] = Field(
None,
example="8080/openapi.yaml",
description="Endpoint that prorvides an OpenAPI schema definition..",
)
health_endpoint: Optional[str] = Field(
None,
example="8080/healthz",
description="The endpoint instruction that can be used for checking the deployment health.",
)
class ServiceInput(ServiceBase, DeploymentInput):
pass
class Service(ServiceBase, Deployment):
pass
class JobBase(BaseModel):
pass
class JobInput(JobBase, DeploymentInput):
pass
# TODO: v2
# output_files: Optional[List[dict]] = Field(
# None,
# description="A list of container internal files that should be uploaded to the storage once the job has succeeded.",
# )
class Job(JobBase, Deployment):
pass
| 34.14786 | 203 | 0.661577 |
affe7129150cd640414b66651c83c7741f24dc6f | 2,612 | py | Python | scripts/lemmatize_doc.py | blodstone/Topic_Summ | 43e578f6567fe4f1fa6d3d32372120e12fb21e12 | [
"MIT"
] | null | null | null | scripts/lemmatize_doc.py | blodstone/Topic_Summ | 43e578f6567fe4f1fa6d3d32372120e12fb21e12 | [
"MIT"
] | null | null | null | scripts/lemmatize_doc.py | blodstone/Topic_Summ | 43e578f6567fe4f1fa6d3d32372120e12fb21e12 | [
"MIT"
] | null | null | null | import re
import json
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP(r'/home/acp16hh/Projects/Others/stanford-corenlp-full-2018-10-05')
finished_files_dir = "../data/smallbbc-split"
def parse_sentences(sentences):
joined_sentence = ''
for sentence in sentences:
sentence = '<sos> {} <eos>'.format(sentence)
joined_sentence = '{} {}'.format(joined_sentence, sentence).strip()
parsed_dict = json.loads(
nlp.annotate(joined_sentence,
properties={
'annotators': 'tokenize,lemma',
'pipelineLanguage': 'en',
'outputFormat': 'json'}))
# (word, lemma)
return ([token['word'].lower()
for sentence in parsed_dict['sentences']
for token in sentence['tokens']], [token['lemma'].lower()
for sentence in parsed_dict['sentences']
for token in sentence['tokens']])
def save_to_json(file_path, save_path, is_save=True):
file = open(file_path, 'r')
all_lemmas = []
all_tokens = []
all_pairs = []
line_i = 1
for line in file:
print(line_i)
line_i += 1
sentences = re.findall("<sos> (.*?) <eos>", line)
split_size = int(len(sentences) / 2)
parsed_1 = parse_sentences(sentences[:split_size])
parsed_2 = parse_sentences(sentences[split_size:])
tokens = parsed_1[0] + parsed_2[0]
lemmas = parsed_1[1] + parsed_2[1]
all_pairs.extend(set(zip(tokens, lemmas)))
all_lemmas.append(' '.join(lemmas).strip())
all_tokens.append(' '.join(tokens).strip())
file.close()
if is_save:
f_lemma = open(finished_files_dir + save_path + '.lemma', "w")
f_lemma.write('\n'.join(all_lemmas).strip())
f_lemma.close()
f_lemma = open(finished_files_dir + save_path + '.token', "w")
f_lemma.write('\n'.join(all_tokens).strip())
f_lemma.close()
return all_pairs
#print('Processing train file:')
#all_pairs_src = save_to_json('../data/bbc-split/src.txt.train', "/src.train")
#all_pairs_tgt = save_to_json('../data/bbc-split/tgt.txt.train', "", False)
#all_pairs = '\n'.join(['{} {}'.format(pair[0], pair[1])
# for pair in set(all_pairs_src + all_pairs_tgt)])
#f_lemma = open(finished_files_dir + '/src.train.pair', "w")
#f_lemma.write(all_pairs)
# print('Processing validation file:')
# save_to_json('../data/bbc-split/src.txt.validation', "/src.validation")
print('Processing test file:')
save_to_json('../data/smallbbc-split/src.txt.test', "/src.test")
| 38.985075 | 88 | 0.616386 |
c5608bdc4b02c091f38a89d0266ff6a2fd80283f | 6,548 | py | Python | extraPackages/pyzmq-17.1.2/buildutils/bundle.py | dolboBobo/python3_ios | 877f8c2c5890f26292ddd14909bea62a04fe2889 | [
"BSD-3-Clause"
] | 130 | 2018-02-03T10:25:54.000Z | 2022-03-25T22:27:22.000Z | extraPackages/pyzmq-17.1.2/buildutils/bundle.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 9 | 2018-12-14T07:31:42.000Z | 2020-12-09T20:29:28.000Z | extraPackages/pyzmq-17.1.2/buildutils/bundle.py | spacetime314/python3_ios | e149f1bc2e50046c8810f83dae7739a8dea939ee | [
"BSD-3-Clause"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | """utilities for fetching build dependencies."""
#-----------------------------------------------------------------------------
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
#
# This bundling code is largely adapted from pyzmq-static's get.sh by
# Brandon Craig-Rhodes, which is itself BSD licensed.
#-----------------------------------------------------------------------------
import os
import shutil
import stat
import sys
import tarfile
import hashlib
from subprocess import Popen, PIPE
try:
# py2
from urllib2 import urlopen
except ImportError:
# py3
from urllib.request import urlopen
from .msg import fatal, debug, info, warn
pjoin = os.path.join
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
bundled_version = (4, 2, 5)
vs = '%i.%i.%i' % bundled_version
libzmq = "zeromq-%s.tar.gz" % vs
libzmq_url = "https://github.com/zeromq/libzmq/releases/download/v{vs}/{libzmq}".format(
vs=vs,
libzmq=libzmq,
)
libzmq_checksum = "sha256:cc9090ba35713d59bb2f7d7965f877036c49c5558ea0c290b0dcc6f2a17e489f"
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def untgz(archive):
return archive.replace('.tar.gz', '')
def localpath(*args):
"""construct an absolute path from a list relative to the root pyzmq directory"""
plist = [ROOT] + list(args)
return os.path.abspath(pjoin(*plist))
def checksum_file(scheme, path):
"""Return the checksum (hex digest) of a file"""
h = getattr(hashlib, scheme)()
with open(path, 'rb') as f:
chunk = f.read(65535)
while chunk:
h.update(chunk)
chunk = f.read(65535)
return h.hexdigest()
def fetch_archive(savedir, url, fname, checksum, force=False):
"""download an archive to a specific location"""
dest = pjoin(savedir, fname)
scheme, digest_ref = checksum.split(':')
if os.path.exists(dest) and not force:
info("already have %s" % dest)
digest = checksum_file(scheme, fname)
if digest == digest_ref:
return dest
else:
warn("but checksum %s != %s, redownloading." % (digest, digest_ref))
os.remove(fname)
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
digest = checksum_file(scheme, dest)
if digest != digest_ref:
fatal("%s %s mismatch:\nExpected: %s\nActual : %s" % (
dest, scheme, digest_ref, digest))
return dest
#-----------------------------------------------------------------------------
# libzmq
#-----------------------------------------------------------------------------
def fetch_libzmq(savedir):
"""download and extract libzmq"""
dest = pjoin(savedir, 'zeromq')
if os.path.exists(dest):
info("already have %s" % dest)
return
path = fetch_archive(savedir, libzmq_url, fname=libzmq, checksum=libzmq_checksum)
tf = tarfile.open(path)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
shutil.move(with_version, dest)
def stage_platform_hpp(zmqroot):
"""stage platform.hpp into libzmq sources
Tries ./configure first (except on Windows),
then falls back on included platform.hpp previously generated.
"""
platform_hpp = pjoin(zmqroot, 'src', 'platform.hpp')
if os.path.exists(platform_hpp):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(zmqroot, 'builds', 'msvc')
else:
info("attempting ./configure to generate platform.hpp")
p = Popen('./configure', cwd=zmqroot, shell=True,
stdout=PIPE, stderr=PIPE,
)
o,e = p.communicate()
if p.returncode:
warn("failed to configure libzmq:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.hpp from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
def copy_and_patch_libzmq(ZMQ, libzmq):
"""copy libzmq into source dir, and patch it if necessary.
This command is necessary prior to running a bdist on Linux or OS X.
"""
if sys.platform.startswith('win'):
return
# copy libzmq into zmq for bdist
local = localpath('zmq',libzmq)
if not ZMQ and not os.path.exists(local):
fatal("Please specify zmq prefix via `setup.py configure --zmq=/path/to/zmq` "
"or copy libzmq into zmq/ manually prior to running bdist.")
try:
# resolve real file through symlinks
lib = os.path.realpath(pjoin(ZMQ, 'lib', libzmq))
print ("copying %s -> %s"%(lib, local))
shutil.copy(lib, local)
except Exception:
if not os.path.exists(local):
fatal("Could not copy libzmq into zmq/, which is necessary for bdist. "
"Please specify zmq prefix via `setup.py configure --zmq=/path/to/zmq` "
"or copy libzmq into zmq/ manually.")
if sys.platform == 'darwin':
# chmod u+w on the lib,
# which can be user-read-only for some reason
mode = os.stat(local).st_mode
os.chmod(local, mode | stat.S_IWUSR)
# patch install_name on darwin, instead of using rpath
cmd = ['install_name_tool', '-id', '@loader_path/../%s'%libzmq, local]
try:
p = Popen(cmd, stdout=PIPE,stderr=PIPE)
except OSError:
fatal("install_name_tool not found, cannot patch libzmq for bundling.")
out,err = p.communicate()
if p.returncode:
fatal("Could not patch bundled libzmq install_name: %s"%err, p.returncode)
| 34.463158 | 91 | 0.567807 |
092d7b160711ea95b44c682ad660b8ddb0e66742 | 352 | py | Python | src/test_instrumenter/if_expr.py | joeldentici/python_stepper | ab32c62d0d0333ad901d7329fb198c7a23988007 | [
"MIT"
] | 1 | 2020-11-29T20:00:39.000Z | 2020-11-29T20:00:39.000Z | src/test_instrumenter/if_expr.py | joeldentici/python_stepper | ab32c62d0d0333ad901d7329fb198c7a23988007 | [
"MIT"
] | null | null | null | src/test_instrumenter/if_expr.py | joeldentici/python_stepper | ab32c62d0d0333ad901d7329fb198c7a23988007 | [
"MIT"
] | null | null | null | import instrumenter
import unittest
class TestIfExpr(unittest.TestCase):
def test_if_expr(self):
src = """
5 if x > 7 else 10
""".strip()
expected = """
stepper_lib.if_expr(x > 7, 5, 10)
""".strip()
actual = instrumenter.instrument(src, "ifexpr").strip()
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | 17.6 | 57 | 0.676136 |
f2eae85acff1c5b206c994f03dafb875e63dd8c0 | 1,575 | py | Python | Atari/experiments/ide/enjoy_atari.py | risk-and-uncertainty/paper | 73c013478d82d13c0e51c4dcccff7f8d9b64cd4c | [
"MIT"
] | 19 | 2019-05-28T14:30:23.000Z | 2022-03-31T03:14:31.000Z | Atari/experiments/ide/enjoy_atari.py | risk-and-uncertainty/paper | 73c013478d82d13c0e51c4dcccff7f8d9b64cd4c | [
"MIT"
] | 4 | 2021-06-08T20:53:26.000Z | 2022-03-12T00:14:06.000Z | Atari/experiments/ide/enjoy_atari.py | risk-and-uncertainty/paper | 73c013478d82d13c0e51c4dcccff7f8d9b64cd4c | [
"MIT"
] | 3 | 2019-07-20T14:40:03.000Z | 2021-02-26T04:09:03.000Z |
import pickle
import torch
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
from rl_baselines.baselines import IDE
from rl_baselines.common.networks import CNNDeepmind_Threehead
from rl_baselines.envs.atari_wrappers import make_atari, wrap_deepmind
AGENT_PATH = '???'
env = make_atari("AlienNoFrameskip-v4",noop=True)
env = wrap_deepmind(env, episode_life=True)
agent = IDE(env,CNNDeepmind_Threehead,n_quantiles=200)
agent.load(AGENT_PATH)
obs = env.reset()
returns = 0
for i in range(10000):
net1,net2,uncertainty = agent.network(torch.FloatTensor(obs))
net1 = net1.view(agent.env.action_space.n,agent.n_quantiles)
net2 = net2.view(agent.env.action_space.n,agent.n_quantiles)
plt.cla()
pred1 = (np.array(net1[0,:].detach()) + np.array(net2[0,:].detach()))/2
pred2 = (np.array(net1[1,:].detach()) + np.array(net2[1,:].detach()))/2
plt.plot(np.array(net1[0,:].detach()), 'r', label="left")
plt.plot(np.array(net2[0,:].detach()), 'r', label="left")
plt.plot(np.array(net1[1,:].detach()), 'g', label="right")
plt.plot(np.array(net2[1,:].detach()), 'g', label="right")
plt.legend()
plt.draw()
plt.pause(0.01)
means = torch.mean((net1+net2)/2,dim=1).detach()
action = agent.predict(torch.FloatTensor(obs),directed_exploration=True)
obs, rew, done, info = env.step(action)
env.render()
time.sleep(0.05)
returns += rew
print(action, "means",means,"uncertainties",uncertainty)
if done:
obs = env.reset()
print(returns)
returns = 0 | 30.288462 | 76 | 0.674921 |
d5e502c0dcb8c7931a5bddf934e7c302c5bdee78 | 78 | py | Python | sslcommerz_sdk/contrib/django_app/__init__.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | 6 | 2021-01-15T13:31:37.000Z | 2021-12-06T13:44:39.000Z | sslcommerz_sdk/contrib/django_app/__init__.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | null | null | null | sslcommerz_sdk/contrib/django_app/__init__.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | null | null | null | default_app_config = "sslcommerz_sdk.contrib.django_app.apps.DjangoAppConfig"
| 39 | 77 | 0.871795 |
6cfafae8c0a35b59994ed880c2509f1110382c28 | 2,273 | py | Python | samples/Reporting/Reports/create-adhoc-report.py | broadpay/cybersource-rest-samples-python | f7af6f58c70ea3bf725d34929b40ee4b5fd4d77c | [
"MIT"
] | 1 | 2021-12-23T16:53:13.000Z | 2021-12-23T16:53:13.000Z | samples/Reporting/Reports/create-adhoc-report.py | broadpay/cybersource-rest-samples-python | f7af6f58c70ea3bf725d34929b40ee4b5fd4d77c | [
"MIT"
] | null | null | null | samples/Reporting/Reports/create-adhoc-report.py | broadpay/cybersource-rest-samples-python | f7af6f58c70ea3bf725d34929b40ee4b5fd4d77c | [
"MIT"
] | null | null | null | from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def create_adhoc_report():
reportDefinitionName = "TransactionRequestClass"
reportFields = []
reportFields.append("Request.RequestID")
reportFields.append("Request.TransactionDate")
reportFields.append("Request.MerchantID")
reportMimeType = "application/xml"
reportName = "testrest_v2"
timezone = "GMT"
reportStartTime = "2021-03-01T17:30:00.000+05:30"
reportEndTime = "2021-03-02T17:30:00.000+05:30"
reportPreferencesSignedAmounts = True
reportPreferencesFieldNameConvention = "SOAPI"
reportPreferences = Reportingv3reportsReportPreferences(
signed_amounts = reportPreferencesSignedAmounts,
field_name_convention = reportPreferencesFieldNameConvention
)
requestObj = CreateAdhocReportRequest(
report_definition_name = reportDefinitionName,
report_fields = reportFields,
report_mime_type = reportMimeType,
report_name = reportName,
timezone = timezone,
report_start_time = reportStartTime,
report_end_time = reportEndTime,
report_preferences = reportPreferences.__dict__
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
organizationId = "testrest"
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = ReportsApi(client_config)
return_data, status, body = api_instance.create_report(requestObj, organization_id=organizationId)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
return return_data
except Exception as e:
print("\nException when calling ReportsApi->create_report: %s\n" % e)
if __name__ == "__main__":
create_adhoc_report()
| 32.942029 | 106 | 0.707875 |
0ebd03d325d18555cbe241152b0fe0606c576422 | 520 | py | Python | src/craterstats/__init__.py | AndrewAnnex/craterstats | d54ab421b2690a2d061ed59b140012d502fa5636 | [
"BSD-3-Clause"
] | 11 | 2021-03-11T03:58:23.000Z | 2022-02-19T01:19:53.000Z | src/craterstats/__init__.py | AndrewAnnex/craterstats | d54ab421b2690a2d061ed59b140012d502fa5636 | [
"BSD-3-Clause"
] | 9 | 2021-03-17T16:43:55.000Z | 2021-12-29T21:29:43.000Z | src/craterstats/__init__.py | AndrewAnnex/craterstats | d54ab421b2690a2d061ed59b140012d502fa5636 | [
"BSD-3-Clause"
] | 6 | 2021-03-18T13:21:22.000Z | 2021-10-31T05:25:43.000Z | # Copyright (c) 2021, Greg Michael
# Licensed under BSD 3-Clause License. See LICENSE.txt for details.
from .Chronologyfn import Chronologyfn
from .Productionfn import Productionfn
from .Cratercount import Cratercount
from .Craterplotset import Craterplotset
from .Craterplot import Craterplot
from .Craterpdf import Craterpdf
from .Epochs import Epochs
from .bin_bias_correction import bin_bias_correction
from .str_diameter_range import str_diameter_range
from .str_age import str_age
from .constants import *
| 26 | 68 | 0.828846 |
7834575217d51b25f981a31f45a06c0c655a219c | 457 | py | Python | test/venv/Scripts/easy_install-3.7-script.py | DiceDn/XgbTest | 7e32a5adf9c27608518264caf93bc6b723ce0168 | [
"MIT"
] | null | null | null | test/venv/Scripts/easy_install-3.7-script.py | DiceDn/XgbTest | 7e32a5adf9c27608518264caf93bc6b723ce0168 | [
"MIT"
] | null | null | null | test/venv/Scripts/easy_install-3.7-script.py | DiceDn/XgbTest | 7e32a5adf9c27608518264caf93bc6b723ce0168 | [
"MIT"
] | null | null | null | #!C:\Users\Richard\PycharmProjects\test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| 35.153846 | 87 | 0.693654 |
cb9f3445da265a31807c69e64873aa7a86693c0b | 1,685 | py | Python | umeboshi/runner.py | makingspace/ticker | 01917bb0234c40903fc4db4aff93f28e0125d087 | [
"BSD-3-Clause"
] | 7 | 2015-12-30T21:34:06.000Z | 2022-01-31T09:18:03.000Z | umeboshi/runner.py | makingspace/ticker | 01917bb0234c40903fc4db4aff93f28e0125d087 | [
"BSD-3-Clause"
] | 2 | 2021-04-15T06:26:05.000Z | 2021-04-16T15:35:07.000Z | umeboshi/runner.py | makingspace/ticker | 01917bb0234c40903fc4db4aff93f28e0125d087 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T08:33:55.000Z | 2021-12-21T08:33:55.000Z | import logging
from django.utils.module_loading import import_string
from umeboshi.exceptions import UnknownTriggerException, RoutineRunException, NoRoutineTriggerException
class Runner(object):
"""
The Umeboshi Runner is responsible for registering and retrieving Routines.
It provides a wrapper around Routine API to establish sane defaults.
"""
def __init__(self):
self.registry = {}
def register(self, cls):
if cls.trigger_name is None:
raise NoRoutineTriggerException
if cls.trigger_name in self.registry:
self.logger.warning('Duplicate definition for trigger {} at {}.{} and {}.{}',
cls.trigger_name, cls.__module__, self.registry[cls.trigger_name],
cls.__module__, cls.__name__)
self.registry[cls.trigger_name] = "{}.{}".format(cls.__module__, cls.__name__)
def get_routine_class(self, trigger_name):
if trigger_name in self.registry:
return import_string(self.registry[trigger_name])
return self.registry[trigger_name]
raise UnknownTriggerException()
def check_validity(self, routine):
if not hasattr(routine, 'check_validity'):
return True
return routine.check_validity()
def run(self, routine):
if not hasattr(routine, 'run'):
raise NotImplementedError
try:
return routine.run()
except Exception as e:
self.logger.exception(e)
raise RoutineRunException()
@property
def logger(self):
return logging.getLogger('django-umeboshi.runner')
runner = Runner()
| 33.039216 | 103 | 0.648665 |
1cfaa8ae31d198111f09327f68a20c9527ae3e96 | 164 | py | Python | Pandas-CalculatingColumnStats.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | Pandas-CalculatingColumnStats.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | Pandas-CalculatingColumnStats.py | H2oPtic/Codecademy-Education | 454ecff36a055fa17b4b338e1c6f1e9b3b94ef66 | [
"MIT"
] | null | null | null | import pandas as pd
orders = pd.read_csv('orders.csv')
print(orders.head(10))
most_expensive = orders.price.max()
num_colors = orders.shoe_color.nunique() | 20.5 | 40 | 0.72561 |
5413f848c1f96ccfdd1bc2480cabf5912dc27f27 | 21,134 | py | Python | msticpy/sectools/tiproviders/ti_provider_base.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | 2 | 2020-11-03T05:56:10.000Z | 2020-11-03T05:56:17.000Z | msticpy/sectools/tiproviders/ti_provider_base.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | null | null | null | msticpy/sectools/tiproviders/ti_provider_base.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | 1 | 2022-02-06T18:56:15.000Z | 2022-02-06T18:56:15.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Module for TILookup classes.
Input can be a single IoC observable or a pandas DataFrame containing
multiple observables. Processing may require a an API key and
processing performance may be limited to a specific number of
requests per minute for the account type that you have.
"""
import abc
from abc import ABC
import math # noqa
import pprint
import re
from collections import Counter, namedtuple
from enum import Enum
from functools import lru_cache, singledispatch, total_ordering
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from urllib.parse import quote_plus
import attr
import pandas as pd
from urllib3.exceptions import LocationParseError
from urllib3.util import parse_url
from ..._version import VERSION
from ...common.utility import export
from ..iocextract import IoCExtract, IoCType
__version__ = VERSION
__author__ = "Ian Hellen"
SanitizedObservable = namedtuple("SanitizedObservable", ["observable", "status"])
# pylint: disable=too-few-public-methods
@total_ordering
class TISeverity(Enum):
"""Threat intelligence report severity."""
unknown = -1
information = 0
warning = 1
high = 2
@classmethod
def parse(cls, value) -> "TISeverity":
"""
Parse string or numeric value to TISeverity.
Parameters
----------
value : Any
TISeverity, str or int
Returns
-------
TISeverity
TISeverity instance.
"""
if isinstance(value, TISeverity):
return value
if isinstance(value, str) and value.lower() in cls.__members__:
return cls[value.lower()]
if isinstance(value, int):
if value in [v.value for v in cls.__members__.values()]:
return cls(value)
return TISeverity.unknown
# pylint: disable=comparison-with-callable
def __eq__(self, other) -> bool:
"""
Return True if severities are equal.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value == other_sev.value
def __gt__(self, other) -> bool:
"""
Return True self is greater than other.
Parameters
----------
other : Any
TISeverity to compare to.
Can be a numeric value or name of TISeverity value.
Returns
-------
bool
If severities are equal
"""
other_sev = TISeverity.parse(other)
return self.value > other_sev.value
# pylint: enable=comparison-with-callable
# pylint: disable=too-many-instance-attributes
@attr.s(auto_attribs=True)
class LookupResult:
"""Lookup result for IoCs."""
ioc: str
ioc_type: str
safe_ioc: str = ""
query_subtype: Optional[str] = None
provider: Optional[str] = None
result: bool = False
severity: int = attr.ib(default=0)
details: Any = None
raw_result: Optional[Union[str, dict]] = None
reference: Optional[str] = None
status: int = 0
@severity.validator
def _check_severity(self, attribute, value):
del attribute
if isinstance(value, TISeverity):
self.severity = value.name
return
self.severity = TISeverity.parse(value).name
@property
def summary(self):
"""Print a summary of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
print("ioc:", self.ioc, "(", self.ioc_type, ")")
print("result:", self.result)
# print("severity:", self.severity)
p_pr.pprint(self.details)
print("reference: ", self.reference)
@property
def raw_result_fmtd(self):
"""Print raw results of the Lookup Result."""
p_pr = pprint.PrettyPrinter(indent=4)
p_pr.pprint(self.raw_result)
@property
def severity_name(self) -> str:
"""
Return text description of severity score.
Returns
-------
str
Severity description.
"""
try:
return TISeverity(self.severity).name
except ValueError:
return TISeverity.unknown.name
def set_severity(self, value: Any):
"""
Set the severity from enum, int or string.
Parameters
----------
value : Any
The severity value to set
"""
self._check_severity(None, value)
@classmethod
def column_map(cls):
"""Return a dictionary that maps fields to DF Names."""
col_mapping = {}
for name in attr.fields_dict(cls):
out_name = "".join([part.capitalize() for part in name.split("_")])
col_mapping[name] = out_name
return col_mapping
# pylint: enable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
class TILookupStatus(Enum):
"""Threat intelligence lookup status."""
ok = 0
not_supported = 1
bad_format = 2
query_failed = 3
other = 10
# pylint: enable=too-few-public-methods
_IOC_EXTRACT = IoCExtract()
@export
class TIProvider(ABC):
"""Abstract base class for Threat Intel providers."""
_IOC_QUERIES: Dict[str, Any] = {}
# pylint: disable=unused-argument
def __init__(self, **kwargs):
"""Initialize the provider."""
self._supported_types: Set[IoCType] = set()
self.description: Optional[str] = None
self._supported_types = {
IoCType.parse(ioc_type.split("-")[0]) for ioc_type in self._IOC_QUERIES
}
if IoCType.unknown in self._supported_types:
self._supported_types.remove(IoCType.unknown)
self.require_url_encoding = False
# pylint: disable=duplicate-code
@abc.abstractmethod
def lookup_ioc(
self, ioc: str, ioc_type: str = None, query_type: str = None, **kwargs
) -> LookupResult:
"""
Lookup a single IoC observable.
Parameters
----------
ioc : str
IoC Observable value
ioc_type : str, optional
IoC Type, by default None (type will be inferred)
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
LookupResult
The returned results.
"""
def lookup_iocs(
self,
data: Union[pd.DataFrame, Dict[str, str], Iterable[str]],
obs_col: str = None,
ioc_type_col: str = None,
query_type: str = None,
**kwargs,
) -> pd.DataFrame:
"""
Lookup collection of IoC observables.
Parameters
----------
data : Union[pd.DataFrame, Dict[str, str], Iterable[str]]
Data input in one of three formats:
1. Pandas dataframe (you must supply the column name in
`obs_col` parameter)
2. Dict of observable, IoCType
3. Iterable of observables - IoCTypes will be inferred
obs_col : str, optional
DataFrame column to use for observables, by default None
ioc_type_col : str, optional
DataFrame column to use for IoCTypes, by default None
query_type : str, optional
Specify the data subtype to be queried, by default None.
If not specified the default record type for the IoC type
will be returned.
Returns
-------
pd.DataFrame
DataFrame of results.
"""
results = []
for observable, ioc_type in generate_items(data, obs_col, ioc_type_col):
if not observable:
continue
item_result = self.lookup_ioc(
ioc=observable, ioc_type=ioc_type, query_type=query_type
)
results.append(pd.Series(attr.asdict(item_result)))
return pd.DataFrame(data=results).rename(columns=LookupResult.column_map())
@abc.abstractmethod
def parse_results(self, response: LookupResult) -> Tuple[bool, TISeverity, Any]:
"""
Return the details of the response.
Parameters
----------
response : LookupResult
The returned data response
Returns
-------
Tuple[bool, TISeverity, Any]
bool = positive or negative hit
TISeverity = enumeration of severity
Object with match details
"""
@property
def supported_types(self) -> List[str]:
"""
Return list of supported IoC types for this provider.
Returns
-------
List[str]
List of supported type names
"""
return [ioc.name for ioc in self._supported_types]
@classmethod
def is_known_type(cls, ioc_type: str) -> bool:
"""
Return True if this a known IoC Type.
Parameters
----------
ioc_type : str
IoCType string to test
Returns
-------
bool
True if known type.
"""
return ioc_type in IoCType.__members__ and ioc_type != "unknown"
@classmethod
def usage(cls):
"""Print usage of provider."""
print(f"{cls.__doc__} Supported query types:")
for ioc_key in sorted(cls._IOC_QUERIES):
ioc_key_elems = ioc_key.split("-", maxsplit=1)
if len(ioc_key_elems) == 1:
print(f"\tioc_type={ioc_key_elems[0]}")
if len(ioc_key_elems) == 2:
print(
f"\tioc_type={ioc_key_elems[0]}, ioc_query_type={ioc_key_elems[1]}"
)
def is_supported_type(self, ioc_type: Union[str, IoCType]) -> bool:
"""
Return True if the passed type is supported.
Parameters
----------
ioc_type : Union[str, IoCType]
IoC type name or instance
Returns
-------
bool
True if supported.
"""
if isinstance(ioc_type, str):
ioc_type = IoCType.parse(ioc_type)
return ioc_type.name in self.supported_types
@staticmethod
@lru_cache(maxsize=1024)
def resolve_ioc_type(observable: str) -> str:
"""
Return IoCType determined by IoCExtract.
Parameters
----------
observable : str
IoC observable string
Returns
-------
str
IoC Type (or unknown if type could not be determined)
"""
return _IOC_EXTRACT.get_ioc_type(observable)
def _check_ioc_type(
self, ioc: str, ioc_type: str = None, query_subtype: str = None
) -> LookupResult:
"""
Check IoC Type and cleans up observable.
Parameters
----------
ioc : str
IoC observable
ioc_type : str, optional
IoC type, by default None
query_subtype : str, optional
Query sub-type, if any, by default None
Returns
-------
LookupResult
Lookup result with resolved ioc_type and pre-processed
observable.
LookupResult.status is none-zero on failure.
"""
result = LookupResult(
ioc=ioc,
safe_ioc=ioc,
ioc_type=ioc_type if ioc_type else self.resolve_ioc_type(ioc),
query_subtype=query_subtype,
result=False,
details="",
raw_result=None,
reference=None,
)
if not self.is_supported_type(result.ioc_type):
result.details = f"IoC type {result.ioc_type} not supported."
result.status = TILookupStatus.not_supported.value
return result
clean_ioc = preprocess_observable(
ioc, result.ioc_type, self.require_url_encoding
)
result.safe_ioc = clean_ioc.observable
if clean_ioc.status != "ok":
result.details = clean_ioc.status
result.status = TILookupStatus.bad_format.value
return result
# slightly stricter than normal URL regex to exclude '() from host string
_HTTP_STRICT_REGEX = r"""
(?P<protocol>(https?|ftp|telnet|ldap|file)://)
(?P<userinfo>([a-z0-9-._~!$&*+,;=:]|%[0-9A-F]{2})*@)?
(?P<host>([a-z0-9-._~!$&\*+,;=]|%[0-9A-F]{2})*)
(:(?P<port>\d*))?
(/(?P<path>([^?\#| ]|%[0-9A-F]{2})*))?
(\?(?P<query>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?
(\#(?P<fragment>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?\b"""
_HTTP_STRICT_RGXC = re.compile(_HTTP_STRICT_REGEX, re.I | re.X | re.M)
# pylint: disable=too-many-return-statements, too-many-branches
def preprocess_observable(
observable, ioc_type, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Preprocesses and checks validity of observable against declared IoC type.
:param observable: the value of the IoC
:param ioc_type: the IoC type
"""
observable = observable.strip()
try:
validated = _IOC_EXTRACT.validate(observable, ioc_type)
except KeyError:
validated = False
if not validated:
return SanitizedObservable(
None, "Observable does not match expected pattern for " + ioc_type
)
if ioc_type == "url":
return _preprocess_url(observable, require_url_encoding)
if ioc_type == "ipv4":
return _preprocess_ip(observable, version=4)
if ioc_type == "ipv6":
return _preprocess_ip(observable, version=6)
if ioc_type in ["dns", "hostname"]:
return _preprocess_dns(observable)
if ioc_type in ["md5_hash", "sha1_hash", "sha256_hash", "file_hash"]:
return _preprocess_hash(observable)
return SanitizedObservable(observable, "ok")
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_url(
url: str, require_url_encoding: bool = False
) -> SanitizedObservable:
"""
Check that URL can be parsed.
Parameters
----------
url : str
The URL to check
require_url_encoding : bool
Set to True if url's require encoding before passing to provider
Returns
-------
SanitizedObservable
Pre-processed result
"""
clean_url, scheme, host = get_schema_and_host(url, require_url_encoding)
if scheme is None or host is None:
return SanitizedObservable(None, f"Could not obtain scheme or host from {url}")
# get rid of some obvious false positives (localhost, local hostnames)
try:
addr = ip_address(host)
if addr.is_private:
return SanitizedObservable(None, "Host part of URL is a private IP address")
if addr.is_loopback:
return SanitizedObservable(
None, "Host part of URL is a loopback IP address"
)
except ValueError:
pass
if "." not in host:
return SanitizedObservable(None, "Host is unqualified domain name")
if scheme.lower() in ["file"]:
return SanitizedObservable(None, f"{scheme} URL scheme is not supported")
return SanitizedObservable(clean_url, "ok")
def get_schema_and_host(
url: str, require_url_encoding: bool = False
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
Return URL scheme and host and cleaned URL.
Parameters
----------
url : str
Input URL
require_url_encoding : bool
Set to True if url needs encoding. Defualt is False.
Returns
-------
Tuple[Optional[str], Optional[str], Optional[str]
Tuple of URL, scheme, host
"""
clean_url = None
scheme = None
host = None
try:
scheme, _, host, _, _, _, _ = parse_url(url)
clean_url = url
except LocationParseError:
# Try to clean URL and re-check
cleaned_url = _clean_url(url)
if cleaned_url is not None:
try:
scheme, _, host, _, _, _, _ = parse_url(cleaned_url)
clean_url = cleaned_url
except LocationParseError:
pass
if require_url_encoding and clean_url:
clean_url = quote_plus(clean_url)
return clean_url, scheme, host
def _clean_url(url: str) -> Optional[str]:
"""
Clean URL to remove query params and fragments and any trailing stuff.
Parameters
----------
url : str
the URL to check
Returns
-------
Optional[str]
Cleaned URL or None if the input was not a valid URL
"""
# Try to clean URL and re-check
match_url = _HTTP_STRICT_RGXC.search(url)
if (
not match_url
or match_url.groupdict()["protocol"] is None
or match_url.groupdict()["host"] is None
):
return None
# build the URL dropping the query string and fragments
clean_url = match_url.groupdict()["protocol"]
if match_url.groupdict()["userinfo"]:
clean_url += match_url.groupdict()["userinfo"]
clean_url += match_url.groupdict()["host"]
if match_url.groupdict()["port"]:
clean_url += ":" + match_url.groupdict()["port"]
if match_url.groupdict()["path"]:
clean_url += "/" + match_url.groupdict()["path"]
return clean_url
# Would complicate code with too many branches
# pylint: disable=too-many-return-statements
def _preprocess_ip(ipaddress: str, version=4):
"""Ensure Ip address is a valid public IPv4 address."""
try:
addr = ip_address(ipaddress)
except ValueError:
return SanitizedObservable(None, "IP address is invalid format")
if version == 4 and not isinstance(addr, IPv4Address):
return SanitizedObservable(None, "Not an IPv4 address")
if version == 6 and not isinstance(addr, IPv6Address):
return SanitizedObservable(None, "Not an IPv6 address")
if addr.is_global:
return SanitizedObservable(ipaddress, "ok")
return SanitizedObservable(None, "IP address is not global")
def _preprocess_dns(domain: str) -> SanitizedObservable:
"""Ensure DNS is a valid-looking domain."""
if "." not in domain:
return SanitizedObservable(None, "Domain is unqualified domain name")
try:
addr = ip_address(domain)
del addr
return SanitizedObservable(None, "Domain is an IP address")
except ValueError:
pass
return SanitizedObservable(domain, "ok")
def _preprocess_hash(hash_str: str) -> SanitizedObservable:
"""Ensure Hash has minimum entropy (rather than a string of 'x')."""
str_entropy = entropy(hash_str)
if str_entropy < 3.0:
return SanitizedObservable(None, "String has too low an entropy to be a hash")
return SanitizedObservable(hash_str, "ok")
def entropy(input_str: str) -> float:
"""Compute entropy of input string."""
str_len = float(len(input_str))
return -sum(
map(
lambda a: (a / str_len) * math.log2(a / str_len),
Counter(input_str).values(),
)
)
@singledispatch
def generate_items(
data: Any, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None
) -> Iterable[Tuple[Optional[str], Optional[str]]]:
"""
Generate item pairs from different input types.
Parameters
----------
data : Any
DataFrame, dictionary or iterable
obs_col : Optional[str]
If `data` is a DataFrame, the column containing the observable value.
ioc_type_col : Optional[str]
If `data` is a DataFrame, the column containing the observable type.
Returns
-------
Iterable[Tuple[Optional[str], Optional[str]]]] - a tuple of Observable/Type.
"""
del obs_col, ioc_type_col
# pylint: disable=isinstance-second-argument-not-valid-type
if isinstance(data, Iterable):
for item in data:
yield item, TIProvider.resolve_ioc_type(item)
else:
yield None, None
@generate_items.register(pd.DataFrame)
def _(data: pd.DataFrame, obs_col: str, ioc_type_col: Optional[str] = None):
for _, row in data.iterrows():
if ioc_type_col is None:
yield row[obs_col], TIProvider.resolve_ioc_type(row[obs_col])
else:
yield row[obs_col], row[ioc_type_col]
@generate_items.register(dict) # type: ignore
def _(data: dict, obs_col: Optional[str] = None, ioc_type_col: Optional[str] = None):
for obs, ioc_type in data.items():
if not ioc_type:
ioc_type = TIProvider.resolve_ioc_type(obs)
yield obs, ioc_type
| 29.070151 | 88 | 0.602536 |
a3bf50961cd371e09f24ebacac003ab9e41bcb1c | 75,234 | py | Python | kafka/conn.py | informatique-cdc/kafka-python | d73bd6fc2f8825e2fddb7c4f091af7b266e37aea | [
"Apache-2.0"
] | null | null | null | kafka/conn.py | informatique-cdc/kafka-python | d73bd6fc2f8825e2fddb7c4f091af7b266e37aea | [
"Apache-2.0"
] | null | null | null | kafka/conn.py | informatique-cdc/kafka-python | d73bd6fc2f8825e2fddb7c4f091af7b266e37aea | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division
import copy
import errno
import io
import logging
from random import shuffle, uniform
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from kafka.vendor import selectors34 as selectors
import socket
import struct
import threading
import time
from kafka.vendor import six
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.oauth.abstract import AbstractTokenProvider
from kafka.protocol.admin import SaslHandShakeRequest, DescribeAclsRequest_v2, DescribeClientQuotasRequest
from kafka.protocol.commit import OffsetFetchRequest
from kafka.protocol.offset import OffsetRequest
from kafka.protocol.produce import ProduceRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.fetch import FetchRequest
from kafka.protocol.parser import KafkaProtocol
from kafka.protocol.types import Int32, Int8
from kafka.scram import ScramClient
from kafka.version import __version__
if six.PY2:
ConnectionError = socket.error
TimeoutError = socket.error
BlockingIOError = Exception
log = logging.getLogger(__name__)
DEFAULT_KAFKA_PORT = 9092
SASL_QOP_AUTH = 1
SASL_QOP_AUTH_INT = 2
SASL_QOP_AUTH_CONF = 4
try:
import ssl
ssl_available = True
try:
SSLEOFError = ssl.SSLEOFError
SSLWantReadError = ssl.SSLWantReadError
SSLWantWriteError = ssl.SSLWantWriteError
SSLZeroReturnError = ssl.SSLZeroReturnError
except AttributeError:
# support older ssl libraries
log.warning('Old SSL module detected.'
' SSL error handling may not operate cleanly.'
' Consider upgrading to Python 3.3 or 2.7.9')
SSLEOFError = ssl.SSLError
SSLWantReadError = ssl.SSLError
SSLWantWriteError = ssl.SSLError
SSLZeroReturnError = ssl.SSLError
except ImportError:
# support Python without ssl libraries
ssl_available = False
class SSLWantReadError(Exception):
pass
class SSLWantWriteError(Exception):
pass
# needed for SASL_GSSAPI authentication:
try:
import gssapi
from gssapi.raw.misc import GSSError
except ImportError:
gssapi = None
GSSError = None
try:
import sspi
import pywintypes
import sspicon
import win32security
except ImportError:
sspi = None
AFI_NAMES = {
socket.AF_UNSPEC: "unspecified",
socket.AF_INET: "IPv4",
socket.AF_INET6: "IPv6",
}
class ConnectionStates(object):
DISCONNECTING = '<disconnecting>'
DISCONNECTED = '<disconnected>'
CONNECTING = '<connecting>'
HANDSHAKE = '<handshake>'
CONNECTED = '<connected>'
AUTHENTICATING = '<authenticating>'
class BrokerConnection(object):
"""Initialize a Kafka broker connection
Keyword Arguments:
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to backoff/wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. Once the maximum is reached,
reconnection attempts will continue periodically with this fixed
rate. To avoid connection storms, a randomization factor of 0.2
will be applied to the backoff resulting in a random range between
20% below and 20% above the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: True.
ssl_cafile (str): optional filename of ca file to use in certificate
verification. default: None.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: None.
ssl_keyfile (str): optional filename containing the client private key.
default: None.
ssl_password (callable, str, bytes, bytearray): optional password or
callable function that returns a password, for decrypting the
client private key. Default: None.
ssl_crlfile (str): optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
default: None.
ssl_ciphers (str): optionally set the available ciphers for ssl
connections. It should be a string in the OpenSSL cipher list
format. If no cipher can be selected (because compile-time options
or other configuration forbids use of all the specified ciphers),
an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
api_version (tuple): Specify which Kafka API version to use.
Accepted values are: (0, 8, 0), (0, 8, 1), (0, 8, 2), (0, 9),
(0, 10). Default: (0, 8, 2)
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
state_change_callback (callable): function to be called when the
connection state changes from CONNECTING to CONNECTED etc.
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
'client_id': 'kafka-python-' + __version__,
'node_id': 0,
'request_timeout_ms': 30000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_crlfile': None,
'ssl_password': None,
'ssl_ciphers': None,
'api_version': (0, 8, 2), # default to most restrictive
'selector': selectors.DefaultSelector,
'state_change_callback': lambda node_id, sock, conn: True,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None
}
SECURITY_PROTOCOLS = ('PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL')
SASL_MECHANISMS = ('PLAIN', 'GSSAPI', 'OAUTHBEARER', "SCRAM-SHA-256", "SCRAM-SHA-512")
def __init__(self, host, port, afi, **configs):
self.host = host
self.port = port
self.afi = afi
self._sock_afi = afi
self._sock_addr = None
self._api_versions = None
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self.node_id = self.config.pop('node_id')
if self.config['receive_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_RCVBUF,
self.config['receive_buffer_bytes']))
if self.config['send_buffer_bytes'] is not None:
self.config['socket_options'].append(
(socket.SOL_SOCKET, socket.SO_SNDBUF,
self.config['send_buffer_bytes']))
assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
'security_protocol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
if self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL'):
assert self.config['sasl_mechanism'] in self.SASL_MECHANISMS, (
'sasl_mechanism must be in ' + ', '.join(self.SASL_MECHANISMS))
if self.config['sasl_mechanism'] in ('PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'):
assert self.config['sasl_plain_username'] is not None, (
'sasl_plain_username required for PLAIN or SCRAM sasl'
)
assert self.config['sasl_plain_password'] is not None, (
'sasl_plain_password required for PLAIN or SCRAM sasl'
)
if self.config['sasl_mechanism'] == 'GSSAPI':
if gssapi is None and sspi is None:
raise AssertionError('No GSSAPI lib available')
assert self.config['sasl_kerberos_service_name'] is not None, 'sasl_kerberos_service_name required for GSSAPI sasl'
if self.config['sasl_mechanism'] == 'OAUTHBEARER':
token_provider = self.config['sasl_oauth_token_provider']
assert token_provider is not None, 'sasl_oauth_token_provider required for OAUTHBEARER sasl'
assert callable(getattr(token_provider, "token", None)), 'sasl_oauth_token_provider must implement method #token()'
# This is not a general lock / this class is not generally thread-safe yet
# However, to avoid pushing responsibility for maintaining
# per-connection locks to the upstream client, we will use this lock to
# make sure that access to the protocol buffer is synchronized
# when sends happen on multiple threads
self._lock = threading.Lock()
# the protocol parser instance manages actual tracking of the
# sequence of in-flight requests to responses, which should
# function like a FIFO queue. For additional request data,
# including tracking request futures and timestamps, we
# can use a simple dictionary of correlation_id => request data
self.in_flight_requests = dict()
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self.state = ConnectionStates.DISCONNECTED
self._reset_reconnect_backoff()
self._sock = None
self._send_buffer = b''
self._ssl_context = None
if self.config['ssl_context'] is not None:
self._ssl_context = self.config['ssl_context']
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = []
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
def _dns_lookup(self):
self._gai = dns_lookup(self.host, self.port, self.afi)
if not self._gai:
log.error('DNS lookup failed for %s:%i (%s)',
self.host, self.port, self.afi)
return False
return True
def _next_afi_sockaddr(self):
if not self._gai:
if not self._dns_lookup():
return
afi, _, __, ___, sockaddr = self._gai.pop(0)
return (afi, sockaddr)
def connect_blocking(self, timeout=float('inf')):
if self.connected():
return True
timeout += time.time()
# First attempt to perform dns lookup
# note that the underlying interface, socket.getaddrinfo,
# has no explicit timeout so we may exceed the user-specified timeout
self._dns_lookup()
# Loop once over all returned dns entries
selector = None
while self._gai:
while time.time() < timeout:
self.connect()
if self.connected():
if selector is not None:
selector.close()
return True
elif self.connecting():
if selector is None:
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_WRITE)
selector.select(1)
elif self.disconnected():
if selector is not None:
selector.close()
selector = None
break
else:
break
return False
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED and not self.blacked_out():
self.last_attempt = time.time()
next_lookup = self._next_afi_sockaddr()
if not next_lookup:
self.close(Errors.KafkaConnectionError('DNS failure'))
return self.state
else:
log.debug('%s: creating new socket', self)
assert self._sock is None
self._sock_afi, self._sock_addr = next_lookup
self._sock = socket.socket(self._sock_afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
self.state = ConnectionStates.CONNECTING
self.config['state_change_callback'](self.node_id, self._sock, self)
log.info('%s: connecting to %s:%d [%s %s]', self, self.host,
self.port, self._sock_addr, AFI_NAMES[self._sock_afi])
if self.state is ConnectionStates.CONNECTING:
# in non-blocking mode, use repeated calls to socket.connect_ex
# to check connection status
ret = None
try:
ret = self._sock.connect_ex(self._sock_addr)
except socket.error as err:
ret = err.errno
# Connection succeeded
if not ret or ret == errno.EISCONN:
log.debug('%s: established TCP connection', self)
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
log.debug('%s: initiating SSL handshake', self)
self.state = ConnectionStates.HANDSHAKE
self.config['state_change_callback'](self.node_id, self._sock, self)
# _wrap_ssl can alter the connection state -- disconnects on failure
self._wrap_ssl()
elif self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
self.config['state_change_callback'](self.node_id, self._sock, self)
else:
# security_protocol PLAINTEXT
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
# Connection failed
# WSAEINVAL == 10022, but errno.WSAEINVAL is not available on non-win systems
elif ret not in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK, 10022):
log.error('Connect attempt to %s returned error %s.'
' Disconnecting.', self, ret)
errstr = errno.errorcode.get(ret, 'UNKNOWN')
self.close(Errors.KafkaConnectionError('{} {}'.format(ret, errstr)))
return self.state
# Needs retry
else:
pass
if self.state is ConnectionStates.HANDSHAKE:
if self._try_handshake():
log.debug('%s: completed SSL handshake.', self)
if self.config['security_protocol'] == 'SASL_SSL':
log.debug('%s: initiating SASL authentication', self)
self.state = ConnectionStates.AUTHENTICATING
else:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state is ConnectionStates.AUTHENTICATING:
assert self.config['security_protocol'] in ('SASL_PLAINTEXT', 'SASL_SSL')
if self._try_authenticate():
# _try_authenticate has side-effects: possibly disconnected on socket errors
if self.state is ConnectionStates.AUTHENTICATING:
log.info('%s: Connection complete.', self)
self.state = ConnectionStates.CONNECTED
self._reset_reconnect_backoff()
self.config['state_change_callback'](self.node_id, self._sock, self)
if self.state not in (ConnectionStates.CONNECTED,
ConnectionStates.DISCONNECTED):
# Connection timed out
request_timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() > request_timeout + self.last_attempt:
log.error('Connection attempt to %s timed out', self)
self.close(Errors.KafkaConnectionError('timeout'))
return self.state
return self.state
def _wrap_ssl(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
if self._ssl_context is None:
log.debug('%s: configuring default SSL Context', self)
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv2 # pylint: disable=no-member
self._ssl_context.options |= ssl.OP_NO_SSLv3 # pylint: disable=no-member
self._ssl_context.verify_mode = ssl.CERT_OPTIONAL
if self.config['ssl_check_hostname']:
self._ssl_context.check_hostname = True
if self.config['ssl_cafile']:
log.info('%s: Loading SSL CA from %s', self, self.config['ssl_cafile'])
self._ssl_context.load_verify_locations(self.config['ssl_cafile'])
self._ssl_context.verify_mode = ssl.CERT_REQUIRED
else:
log.info('%s: Loading system default SSL CAs from %s', self, ssl.get_default_verify_paths())
self._ssl_context.load_default_certs()
if self.config['ssl_certfile'] and self.config['ssl_keyfile']:
log.info('%s: Loading SSL Cert from %s', self, self.config['ssl_certfile'])
log.info('%s: Loading SSL Key from %s', self, self.config['ssl_keyfile'])
self._ssl_context.load_cert_chain(
certfile=self.config['ssl_certfile'],
keyfile=self.config['ssl_keyfile'],
password=self.config['ssl_password'])
if self.config['ssl_crlfile']:
if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'):
raise RuntimeError('This version of Python does not support ssl_crlfile!')
log.info('%s: Loading SSL CRL from %s', self, self.config['ssl_crlfile'])
self._ssl_context.load_verify_locations(self.config['ssl_crlfile'])
# pylint: disable=no-member
self._ssl_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
if self.config['ssl_ciphers']:
log.info('%s: Setting SSL Ciphers: %s', self, self.config['ssl_ciphers'])
self._ssl_context.set_ciphers(self.config['ssl_ciphers'])
log.debug('%s: wrapping socket in ssl context', self)
try:
self._sock = self._ssl_context.wrap_socket(
self._sock,
server_hostname=self.host,
do_handshake_on_connect=False)
except ssl.SSLError as e:
log.exception('%s: Failed to wrap socket in SSLContext!', self)
self.close(e)
def _try_handshake(self):
assert self.config['security_protocol'] in ('SSL', 'SASL_SSL')
try:
self._sock.do_handshake()
return True
# old ssl in python2.6 will swallow all SSLErrors here...
except (SSLWantReadError, SSLWantWriteError):
pass
except (SSLZeroReturnError, ConnectionError, TimeoutError, SSLEOFError):
log.warning('SSL connection closed by server during handshake.')
self.close(Errors.KafkaConnectionError('SSL connection closed by server during handshake'))
# Other SSLErrors will be raised to user
return False
def _try_authenticate(self):
assert self.config['api_version'] is None or self.config['api_version'] >= (0, 10)
if self._sasl_auth_future is None:
# Build a SaslHandShakeRequest message
request = SaslHandShakeRequest[0](self.config['sasl_mechanism'])
future = Future()
sasl_response = self._send(request)
sasl_response.add_callback(self._handle_sasl_handshake_response, future)
sasl_response.add_errback(lambda f, e: f.failure(e), future)
self._sasl_auth_future = future
for r, f in self.recv():
f.success(r)
# A connection error could trigger close() which will reset the future
if self._sasl_auth_future is None:
return False
elif self._sasl_auth_future.failed():
ex = self._sasl_auth_future.exception
if not isinstance(ex, Errors.KafkaConnectionError):
raise ex # pylint: disable-msg=raising-bad-type
return self._sasl_auth_future.succeeded()
def _handle_sasl_handshake_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
error = error_type(self)
self.close(error=error)
return future.failure(error_type(self))
if self.config['sasl_mechanism'] not in response.enabled_mechanisms:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'Kafka broker does not support %s sasl mechanism. Enabled mechanisms are: %s'
% (self.config['sasl_mechanism'], response.enabled_mechanisms)))
elif self.config['sasl_mechanism'] == 'PLAIN':
return self._try_authenticate_plain(future)
elif self.config['sasl_mechanism'] == 'GSSAPI':
return self._try_authenticate_gssapi(future)
elif self.config['sasl_mechanism'] == 'OAUTHBEARER':
return self._try_authenticate_oauth(future)
elif self.config['sasl_mechanism'].startswith("SCRAM-SHA-"):
return self._try_authenticate_scram(future)
else:
return future.failure(
Errors.UnsupportedSaslMechanismError(
'kafka-python does not support SASL mechanism %s' %
self.config['sasl_mechanism']))
def _send_bytes(self, data):
"""Send some data via non-blocking IO
Note: this method is not synchronized internally; you should
always hold the _lock before calling
Returns: number of bytes
Raises: socket exception
"""
total_sent = 0
while total_sent < len(data):
try:
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
raise
except BlockingIOError:
if six.PY3:
break
raise
return total_sent
def _send_bytes_blocking(self, data):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
total_sent = 0
try:
while total_sent < len(data):
sent_bytes = self._sock.send(data[total_sent:])
total_sent += sent_bytes
if total_sent != len(data):
raise ConnectionError('Buffer overrun during socket send')
return total_sent
finally:
self._sock.settimeout(0.0)
def _recv_bytes_blocking(self, n):
self._sock.settimeout(self.config['request_timeout_ms'] / 1000)
try:
data = b''
while len(data) < n:
fragment = self._sock.recv(n - len(data))
if not fragment:
raise ConnectionError('Connection reset during recv')
data += fragment
return data
finally:
self._sock.settimeout(0.0)
def _try_authenticate_plain(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Sending username and password in the clear', self)
data = b''
# Send PLAIN credentials per RFC-4616
msg = bytes('\0'.join([self.config['sasl_plain_username'],
self.config['sasl_plain_username'],
self.config['sasl_plain_password']]).encode('utf-8'))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated as %s via PLAIN', self, self.config['sasl_plain_username'])
return future.success(True)
def _try_authenticate_scram(self, future):
if self.config['security_protocol'] == 'SASL_PLAINTEXT':
log.warning('%s: Exchanging credentials in the clear', self)
scram_client = ScramClient(
self.config['sasl_plain_username'], self.config['sasl_plain_password'], self.config['sasl_mechanism']
)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
client_first = scram_client.first_message().encode('utf-8')
size = Int32.encode(len(client_first))
self._send_bytes_blocking(size + client_first)
(data_len,) = struct.unpack('>i', self._recv_bytes_blocking(4))
server_first = self._recv_bytes_blocking(data_len).decode('utf-8')
scram_client.process_server_first_message(server_first)
client_final = scram_client.final_message().encode('utf-8')
size = Int32.encode(len(client_final))
self._send_bytes_blocking(size + client_final)
(data_len,) = struct.unpack('>i', self._recv_bytes_blocking(4))
server_final = self._recv_bytes_blocking(data_len).decode('utf-8')
scram_client.process_server_final_message(server_final)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info(
'%s: Authenticated as %s via %s', self, self.config['sasl_plain_username'], self.config['sasl_mechanism']
)
return future.success(True)
def _try_authenticate_gssapi(self, future):
if gssapi is not None:
return self._try_authenticate_gssapi_gss_implementation(future)
if sspi is not None:
return self._try_authenticate_gssapi_sspi_implementation(future)
def _try_authenticate_gssapi_gss_implementation(self, future):
kerberos_host_name = self.config['sasl_kerberos_domain_name'] or self.host
auth_id = self.config['sasl_kerberos_service_name'] + '@' + kerberos_host_name
gssapi_name = gssapi.Name(
auth_id,
name_type=gssapi.NameType.hostbased_service
).canonicalize(gssapi.MechType.kerberos)
log.debug('%s: GSSAPI Service Principal Name: %s', self, gssapi_name)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
# Establish security context and negotiate protection level
# For reference RFC 2222, section 7.2.1
try:
# Exchange tokens until authentication either succeeds or fails
client_ctx = gssapi.SecurityContext(name=gssapi_name, usage='initiate')
received_token = None
while not client_ctx.complete:
# calculate an output token from kafka token (or None if first iteration)
output_token = client_ctx.step(received_token)
# pass output token to kafka, or send empty response if the security
# context is complete (output token is None in that case)
if output_token is None:
self._send_bytes_blocking(Int32.encode(0))
else:
msg = output_token
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
# The server will send a token back. Processing of this token either
# establishes a security context, or it needs further token exchange.
# The gssapi will be able to identify the needed next step.
# The connection is closed on failure.
header = self._recv_bytes_blocking(4)
(token_size,) = struct.unpack('>i', header)
received_token = self._recv_bytes_blocking(token_size)
# Process the security layer negotiation token, sent by the server
# once the security context is established.
# unwraps message containing supported protection levels and msg size
msg = client_ctx.unwrap(received_token).message
# Kafka currently doesn't support integrity or confidentiality security layers, so we
# simply set QoP to 'auth' only (first octet). We reuse the max message size proposed
# by the server
msg = Int8.encode(SASL_QOP_AUTH & Int8.decode(io.BytesIO(msg[0:1]))) + msg[1:]
# add authorization identity to the response, GSS-wrap and send it
msg = client_ctx.wrap(msg + auth_id.encode(), False).message
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
except Exception as e:
err = e
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
log.info(
'%s: Authenticated as %s to %s via GSSAPI',
self,
client_ctx.initiator_name,
client_ctx.target_name
)
return future.success(True)
def _try_authenticate_gssapi_sspi_implementation(self, future):
global log_sspi
log_sspi = logging.getLogger("kafka.conn.sspi")
kerberos_host_name = self.config['sasl_kerberos_domain_name'] or self.host
service_principal_name = self.config['sasl_kerberos_service_name'] + '/' + kerberos_host_name
scheme = "Kerberos" # Do not try with Negotiate for SASL authentication. Tokens are different.
# https://docs.microsoft.com/en-us/windows/win32/secauthn/context-requirements
flags = (
sspicon.ISC_REQ_MUTUAL_AUTH | # mutual authentication
sspicon.ISC_REQ_INTEGRITY | # check for integrity
sspicon.ISC_REQ_SEQUENCE_DETECT | # enable out-of-order messages
sspicon.ISC_REQ_CONFIDENTIALITY # request confidentiality
)
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
# Establish security context and negotiate protection level
# For reference see RFC 4752, section 3
try:
log_sspi.debug("Create client security context")
# instantiate sspi context
client_ctx = sspi.ClientAuth(
scheme,
targetspn=service_principal_name,
scflags=flags,
)
# Print some SSPI implementation
log_sspi.info("Using %s SSPI Security Package (%s)", client_ctx.pkg_info["Name"], client_ctx.pkg_info["Comment"])
# Exchange tokens until authentication either succeeds or fails
log_sspi.debug("Beginning rounds...")
received_token = None # no token to pass when initiating the first round
while not client_ctx.authenticated:
# calculate an output token from kafka token (or None on first iteration)
# https://docs.microsoft.com/en-us/windows/win32/api/sspi/nf-sspi-initializesecuritycontexta
# https://docs.microsoft.com/en-us/windows/win32/secauthn/initializesecuritycontext--kerberos
# authorize method will wrap for us our token in sspi structures
log_sspi.debug("Exchange a token")
error, auth = client_ctx.authorize(received_token)
if len(auth) > 0 and len(auth[0].Buffer):
log_sspi.debug("Got token from context")
# this buffer must be sent to the server whatever the result is
output_token = auth[0].Buffer
else:
log_sspi.debug("Got no token, exchange finished")
# seems to be the end of the loop
output_token = None
# pass output token to kafka, or send empty response if the security
# context is complete (output token is None in that case)
if output_token is None:
log_sspi.debug("Sending end of exchange to server")
self._send_bytes_blocking(Int32.encode(0))
else:
log_sspi.debug("Sending token from local context to server")
msg = output_token
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
# The server will send a token back. Processing of this token either
# establishes a security context, or it needs further token exchange.
# The gssapi will be able to identify the needed next step.
# The connection is closed on failure.
header = self._recv_bytes_blocking(4)
(token_size,) = struct.unpack('>i', header)
received_token = self._recv_bytes_blocking(token_size)
log_sspi.debug("Received token from server (size %s)", token_size)
# Process the security layer negotiation token, sent by the server
# once the security context is established.
# The following part is required by SASL, but not by classic Kerberos.
# See RFC 4752
# unwraps message containing supported protection levels and msg size
msg, was_encrypted = client_ctx.unwrap(received_token)
# Kafka currently doesn't support integrity or confidentiality security layers, so we
# simply set QoP to 'auth' only (first octet). We reuse the max message size proposed
# by the server
msg = Int8.encode(SASL_QOP_AUTH & Int8.decode(io.BytesIO(msg[0:1]))) + msg[1:]
# add authorization identity to the response, GSS-wrap and send it
msg = msg + service_principal_name.encode("utf-8")
msg = client_ctx.wrap(msg)
size = Int32.encode(len(msg))
self._send_bytes_blocking(size + msg)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
except Exception as e:
err = e
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
# noinspection PyUnresolvedReferences
log.info(
'%s: Authenticated as %s to %s via SSPI/GSSAPI \\o/',
self,
client_ctx.initiator_name,
client_ctx.service_name
)
return future.success(True)
def _try_authenticate_oauth(self, future):
data = b''
msg = bytes(self._build_oauth_client_request().encode("utf-8"))
size = Int32.encode(len(msg))
err = None
close = False
with self._lock:
if not self._can_send_recv():
err = Errors.NodeNotReadyError(str(self))
close = False
else:
try:
# Send SASL OAuthBearer request with OAuth token
self._send_bytes_blocking(size + msg)
# The server will send a zero sized message (that is Int32(0)) on success.
# The connection is closed on failure
data = self._recv_bytes_blocking(4)
except (ConnectionError, TimeoutError) as e:
log.exception("%s: Error receiving reply from server", self)
err = Errors.KafkaConnectionError("%s: %s" % (self, e))
close = True
if err is not None:
if close:
self.close(error=err)
return future.failure(err)
if data != b'\x00\x00\x00\x00':
error = Errors.AuthenticationFailedError('Unrecognized response during authentication')
return future.failure(error)
log.info('%s: Authenticated via OAuth', self)
return future.success(True)
def _build_oauth_client_request(self):
token_provider = self.config['sasl_oauth_token_provider']
return "n,,\x01auth=Bearer {}{}\x01\x01".format(token_provider.token(), self._token_extensions())
def _token_extensions(self):
"""
Return a string representation of the OPTIONAL key-value pairs that can be sent with an OAUTHBEARER
initial request.
"""
token_provider = self.config['sasl_oauth_token_provider']
# Only run if the #extensions() method is implemented by the clients Token Provider class
# Builds up a string separated by \x01 via a dict of key value pairs
if callable(getattr(token_provider, "extensions", None)) and len(token_provider.extensions()) > 0:
msg = "\x01".join(["{}={}".format(k, v) for k, v in token_provider.extensions().items()])
return "\x01" + msg
else:
return ""
def blacked_out(self):
"""
Return true if we are disconnected from the given node and can't
re-establish a connection yet
"""
if self.state is ConnectionStates.DISCONNECTED:
if time.time() < self.last_attempt + self._reconnect_backoff:
return True
return False
def connection_delay(self):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting or connected, returns a very
large number to handle slow/stalled connections.
"""
time_waited = time.time() - (self.last_attempt or 0)
if self.state is ConnectionStates.DISCONNECTED:
return max(self._reconnect_backoff - time_waited, 0) * 1000
else:
# When connecting or connected, we should be able to delay
# indefinitely since other events (connection or data acked) will
# cause a wakeup once data can be sent.
return float('inf')
def connected(self):
"""Return True iff socket is connected."""
return self.state is ConnectionStates.CONNECTED
def connecting(self):
"""Returns True if still connecting (this may encompass several
different states, such as SSL handshake, authorization, etc)."""
return self.state in (ConnectionStates.CONNECTING,
ConnectionStates.HANDSHAKE,
ConnectionStates.AUTHENTICATING)
def disconnected(self):
"""Return True iff socket is closed"""
return self.state is ConnectionStates.DISCONNECTED
def _reset_reconnect_backoff(self):
self._failures = 0
self._reconnect_backoff = self.config['reconnect_backoff_ms'] / 1000.0
def _update_reconnect_backoff(self):
# Do not mark as failure if there are more dns entries available to try
if len(self._gai) > 0:
return
if self.config['reconnect_backoff_max_ms'] > self.config['reconnect_backoff_ms']:
self._failures += 1
self._reconnect_backoff = self.config['reconnect_backoff_ms'] * 2 ** (self._failures - 1)
self._reconnect_backoff = min(self._reconnect_backoff, self.config['reconnect_backoff_max_ms'])
self._reconnect_backoff *= uniform(0.8, 1.2)
self._reconnect_backoff /= 1000.0
log.debug('%s: reconnect backoff %s after %s failures', self, self._reconnect_backoff, self._failures)
def _close_socket(self):
if hasattr(self, '_sock') and self._sock is not None:
self._sock.close()
self._sock = None
def __del__(self):
self._close_socket()
def close(self, error=None):
"""Close socket and fail all in-flight-requests.
Arguments:
error (Exception, optional): pending in-flight-requests
will be failed with this exception.
Default: kafka.errors.KafkaConnectionError.
"""
if self.state is ConnectionStates.DISCONNECTED:
return
with self._lock:
if self.state is ConnectionStates.DISCONNECTED:
return
log.info('%s: Closing connection. %s', self, error or '')
self._update_reconnect_backoff()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
api_version=self.config['api_version'])
self._send_buffer = b''
if error is None:
error = Errors.Cancelled(str(self))
ifrs = list(self.in_flight_requests.items())
self.in_flight_requests.clear()
self.state = ConnectionStates.DISCONNECTED
# To avoid race conditions and/or deadlocks
# keep a reference to the socket but leave it
# open until after the state_change_callback
# This should give clients a change to deregister
# the socket fd from selectors cleanly.
sock = self._sock
self._sock = None
# drop lock before state change callback and processing futures
self.config['state_change_callback'](self.node_id, sock, self)
sock.close()
for (_correlation_id, (future, _timestamp)) in ifrs:
future.failure(error)
def _can_send_recv(self):
"""Return True iff socket is ready for requests / responses"""
return self.state in (ConnectionStates.AUTHENTICATING,
ConnectionStates.CONNECTED)
def send(self, request, blocking=True):
"""Queue request for async network send, return Future()"""
future = Future()
if self.connecting():
return future.failure(Errors.NodeNotReadyError(str(self)))
elif not self.connected():
return future.failure(Errors.KafkaConnectionError(str(self)))
elif not self.can_send_more():
return future.failure(Errors.TooManyInFlightRequests(str(self)))
return self._send(request, blocking=blocking)
def _send(self, request, blocking=True):
future = Future()
with self._lock:
if not self._can_send_recv():
# In this case, since we created the future above,
# we know there are no callbacks/errbacks that could fire w/
# lock. So failing + returning inline should be safe
return future.failure(Errors.NodeNotReadyError(str(self)))
correlation_id = self._protocol.send_request(request)
log.debug('%s Request %d: %s', self, correlation_id, request)
if request.expect_response():
sent_time = time.time()
assert correlation_id not in self.in_flight_requests, 'Correlation ID already in-flight!'
self.in_flight_requests[correlation_id] = (future, sent_time)
else:
future.success(None)
# Attempt to replicate behavior from prior to introduction of
# send_pending_requests() / async sends
if blocking:
self.send_pending_requests()
return future
def send_pending_requests(self):
"""Attempts to send pending requests messages via blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
data = self._protocol.send_bytes()
total_bytes = self._send_bytes_blocking(data)
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
return True
except (ConnectionError, TimeoutError) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def send_pending_requests_v2(self):
"""Attempts to send pending requests messages via non-blocking IO
If all requests have been sent, return True
Otherwise, if the socket is blocked and there are more bytes to send,
return False.
"""
try:
with self._lock:
if not self._can_send_recv():
return False
# _protocol.send_bytes returns encoded requests to send
# we send them via _send_bytes()
# and hold leftover bytes in _send_buffer
if not self._send_buffer:
self._send_buffer = self._protocol.send_bytes()
total_bytes = 0
if self._send_buffer:
total_bytes = self._send_bytes(self._send_buffer)
self._send_buffer = self._send_buffer[total_bytes:]
if self._sensors:
self._sensors.bytes_sent.record(total_bytes)
# Return True iff send buffer is empty
return len(self._send_buffer) == 0
except (ConnectionError, TimeoutError, Exception) as e:
log.exception("Error sending request data to %s", self)
error = Errors.KafkaConnectionError("%s: %s" % (self, e))
self.close(error=error)
return False
def can_send_more(self):
"""Return True unless there are max_in_flight_requests_per_connection."""
max_ifrs = self.config['max_in_flight_requests_per_connection']
return len(self.in_flight_requests) < max_ifrs
def recv(self):
"""Non-blocking network receive.
Return list of (response, future) tuples
"""
responses = self._recv()
if not responses and self.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
self, self.config['request_timeout_ms'])
self.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
self.config['request_timeout_ms']))
return ()
# augment responses w/ correlation_id, future, and timestamp
for i, (correlation_id, response) in enumerate(responses):
try:
with self._lock:
(future, timestamp) = self.in_flight_requests.pop(correlation_id)
except KeyError:
self.close(Errors.KafkaConnectionError('Received unrecognized correlation id'))
return ()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
return responses
def _recv(self):
"""Take all available bytes from socket, return list of any responses from parser"""
recvd = []
err = None
with self._lock:
if not self._can_send_recv():
log.warning('%s cannot recv: socket not connected', self)
return ()
while len(recvd) < self.config['sock_chunk_buffer_count']:
try:
data = self._sock.recv(self.config['sock_chunk_bytes'])
# We expect socket.recv to raise an exception if there are no
# bytes available to read from the socket in non-blocking mode.
# but if the socket is disconnected, we will get empty data
# without an exception raised
if not data:
log.error('%s: socket disconnected', self)
err = Errors.KafkaConnectionError('socket disconnected')
break
else:
recvd.append(data)
except (SSLWantReadError, SSLWantWriteError):
break
except (ConnectionError, TimeoutError) as e:
if six.PY2 and e.errno == errno.EWOULDBLOCK:
break
log.exception('%s: Error receiving network data'
' closing socket', self)
err = Errors.KafkaConnectionError(e)
break
except BlockingIOError:
if six.PY3:
break
# For PY2 this is a catchall and should be re-raised
raise
# Only process bytes if there was no connection exception
if err is None:
recvd_data = b''.join(recvd)
if self._sensors:
self._sensors.bytes_received.record(len(recvd_data))
# We need to keep the lock through protocol receipt
# so that we ensure that the processed byte order is the
# same as the received byte order
try:
return self._protocol.receive_bytes(recvd_data)
except Errors.KafkaProtocolError as e:
err = e
self.close(error=err)
return ()
def requests_timed_out(self):
with self._lock:
if self.in_flight_requests:
get_timestamp = lambda v: v[1]
oldest_at = min(map(get_timestamp,
self.in_flight_requests.values()))
timeout = self.config['request_timeout_ms'] / 1000.0
if time.time() >= oldest_at + timeout:
return True
return False
def _handle_api_version_response(self, response):
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
self._api_versions = dict([
(api_key, (min_version, max_version))
for api_key, min_version, max_version in response.api_versions
])
return self._api_versions
def get_api_versions(self):
if self._api_versions is not None:
return self._api_versions
version = self.check_version()
if version < (0, 10, 0):
raise Errors.UnsupportedVersionError(
"ApiVersion not supported by cluster version {} < 0.10.0"
.format(version))
# _api_versions is set as a side effect of check_versions() on a cluster
# that supports 0.10.0 or later
return self._api_versions
def _infer_broker_version_from_api_versions(self, api_versions):
# The logic here is to check the list of supported request versions
# in reverse order. As soon as we find one that works, return it
test_cases = [
# format (<broker version>, <needed struct>)
((2, 6, 0), DescribeClientQuotasRequest[0]),
((2, 5, 0), DescribeAclsRequest_v2),
((2, 4, 0), ProduceRequest[8]),
((2, 3, 0), FetchRequest[11]),
((2, 2, 0), OffsetRequest[5]),
((2, 1, 0), FetchRequest[10]),
((2, 0, 0), FetchRequest[8]),
((1, 1, 0), FetchRequest[7]),
((1, 0, 0), MetadataRequest[5]),
((0, 11, 0), MetadataRequest[4]),
((0, 10, 2), OffsetFetchRequest[2]),
((0, 10, 1), MetadataRequest[2]),
]
# Get the best match of test cases
for broker_version, struct in sorted(test_cases, reverse=True):
if struct.API_KEY not in api_versions:
continue
min_version, max_version = api_versions[struct.API_KEY]
if min_version <= struct.API_VERSION <= max_version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
def check_version(self, timeout=2, strict=False, topics=[]):
"""Attempt to guess the broker version.
Note: This is a blocking call.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
"""
timeout_at = time.time() + timeout
log.info('Probing node %s broker version', self.node_id)
# Monkeypatch some connection configurations to avoid timeouts
override_config = {
'request_timeout_ms': timeout * 1000,
'max_in_flight_requests_per_connection': 5
}
stashed = {}
for key in override_config:
stashed[key] = self.config[key]
self.config[key] = override_config[key]
def reset_override_configs():
for key in stashed:
self.config[key] = stashed[key]
# kafka kills the connection when it doesn't recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
test_cases = [
# All cases starting from 0.10 will be based on ApiVersionResponse
((0, 10), ApiVersionRequest[0]()),
((0, 9), ListGroupsRequest[0]()),
((0, 8, 2), GroupCoordinatorRequest[0]('kafka-python-default-group')),
((0, 8, 1), OffsetFetchRequest[0]('kafka-python-default-group', [])),
((0, 8, 0), MetadataRequest[0](topics)),
]
for version, request in test_cases:
if not self.connect_blocking(timeout_at - time.time()):
reset_override_configs()
raise Errors.NodeNotReadyError()
f = self.send(request)
# HACK: sleeping to wait for socket to send bytes
time.sleep(0.1)
# when broker receives an unrecognized request API
# it abruptly closes our socket.
# so we attempt to send a second request immediately
# that we believe it will definitely recognize (metadata)
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self.send(MetadataRequest[0](topics))
selector = self.config['selector']()
selector.register(self._sock, selectors.EVENT_READ)
while not (f.is_done and mr.is_done):
selector.select(1)
for response, future in self.recv():
future.success(response)
selector.close()
if f.succeeded():
if isinstance(request, ApiVersionRequest[0]):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identified as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
# Only enable strict checking to verify that we understand failure
# modes. For most users, the fact that the request failed should be
# enough to rule out a particular broker version.
if strict:
# If the socket flush hack did not work (which should force the
# connection to close and fail all pending requests), then we
# get a basic Request Timeout. This is not ideal, but we'll deal
if isinstance(f.exception, Errors.RequestTimedOutError):
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests (bug...). In this case we expect to see a correlation
# id mismatch
elif (isinstance(f.exception, Errors.CorrelationIdError) and
version == (0, 10)):
pass
elif six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
else:
reset_override_configs()
raise Errors.UnrecognizedBrokerVersion()
reset_override_configs()
return version
def __str__(self):
return "<BrokerConnection node_id=%s host=%s:%d %s [%s %s]>" % (
self.node_id, self.host, self.port, self.state,
AFI_NAMES[self._sock_afi], self._sock_addr)
class BrokerConnectionMetrics(object):
def __init__(self, metrics, metric_group_prefix, node_id):
self.metrics = metrics
# Any broker may have registered summary metrics already
# but if not, we need to create them so we can set as parents below
all_conns_transferred = metrics.get_sensor('bytes-sent-received')
if not all_conns_transferred:
metric_group_name = metric_group_prefix + '-metrics'
bytes_transferred = metrics.sensor('bytes-sent-received')
bytes_transferred.add(metrics.metric_name(
'network-io-rate', metric_group_name,
'The average number of network operations (reads or writes) on all'
' connections per second.'), Rate(sampled_stat=Count()))
bytes_sent = metrics.sensor('bytes-sent',
parents=[bytes_transferred])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second to all'
' servers.'), Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'), Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'), Max())
bytes_received = metrics.sensor('bytes-received',
parents=[bytes_transferred])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off all sockets'), Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'Responses received sent per second.'),
Rate(sampled_stat=Count()))
request_latency = metrics.sensor('request-latency')
request_latency.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_latency.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
# if one sensor of the metrics has been registered for the connection,
# then all other sensors should have been registered; and vice versa
node_str = 'node-{0}'.format(node_id)
node_sensor = metrics.get_sensor(node_str + '.bytes-sent')
if not node_sensor:
metric_group_name = metric_group_prefix + '-node-metrics.' + node_str
bytes_sent = metrics.sensor(
node_str + '.bytes-sent',
parents=[metrics.get_sensor('bytes-sent')])
bytes_sent.add(metrics.metric_name(
'outgoing-byte-rate', metric_group_name,
'The average number of outgoing bytes sent per second.'),
Rate())
bytes_sent.add(metrics.metric_name(
'request-rate', metric_group_name,
'The average number of requests sent per second.'),
Rate(sampled_stat=Count()))
bytes_sent.add(metrics.metric_name(
'request-size-avg', metric_group_name,
'The average size of all requests in the window.'),
Avg())
bytes_sent.add(metrics.metric_name(
'request-size-max', metric_group_name,
'The maximum size of any request sent in the window.'),
Max())
bytes_received = metrics.sensor(
node_str + '.bytes-received',
parents=[metrics.get_sensor('bytes-received')])
bytes_received.add(metrics.metric_name(
'incoming-byte-rate', metric_group_name,
'Bytes/second read off node-connection socket'),
Rate())
bytes_received.add(metrics.metric_name(
'response-rate', metric_group_name,
'The average number of responses received per second.'),
Rate(sampled_stat=Count()))
request_time = metrics.sensor(
node_str + '.latency',
parents=[metrics.get_sensor('request-latency')])
request_time.add(metrics.metric_name(
'request-latency-avg', metric_group_name,
'The average request latency in ms.'),
Avg())
request_time.add(metrics.metric_name(
'request-latency-max', metric_group_name,
'The maximum request latency in ms.'),
Max())
self.bytes_sent = metrics.sensor(node_str + '.bytes-sent')
self.bytes_received = metrics.sensor(node_str + '.bytes-received')
self.request_time = metrics.sensor(node_str + '.latency')
def _address_family(address):
"""
Attempt to determine the family of an address (or hostname)
:return: either socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC if the address family
could not be determined
"""
if address.startswith('[') and address.endswith(']'):
return socket.AF_INET6
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, address)
return af
except (ValueError, AttributeError, socket.error):
continue
return socket.AF_UNSPEC
def get_ip_port_afi(host_and_port_str):
"""
Parse the IP and port from a string in the format of:
* host_or_ip <- Can be either IPv4 address literal or hostname/fqdn
* host_or_ipv4:port <- Can be either IPv4 address literal or hostname/fqdn
* [host_or_ip] <- IPv6 address literal
* [host_or_ip]:port. <- IPv6 address literal
.. note:: IPv6 address literals with ports *must* be enclosed in brackets
.. note:: If the port is not specified, default will be returned.
:return: tuple (host, port, afi), afi will be socket.AF_INET or socket.AF_INET6 or socket.AF_UNSPEC
"""
host_and_port_str = host_and_port_str.strip()
if host_and_port_str.startswith('['):
af = socket.AF_INET6
host, rest = host_and_port_str[1:].split(']')
if rest:
port = int(rest[1:])
else:
port = DEFAULT_KAFKA_PORT
return host, port, af
else:
if ':' not in host_and_port_str:
af = _address_family(host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, af
else:
# now we have something with a colon in it and no square brackets. It could be
# either an IPv6 address literal (e.g., "::1") or an IP:port pair or a host:port pair
try:
# if it decodes as an IPv6 address, use that
socket.inet_pton(socket.AF_INET6, host_and_port_str)
return host_and_port_str, DEFAULT_KAFKA_PORT, socket.AF_INET6
except AttributeError:
log.warning('socket.inet_pton not available on this platform.'
' consider `pip install win_inet_pton`')
pass
except (ValueError, socket.error):
# it's a host:port pair
pass
host, port = host_and_port_str.rsplit(':', 1)
port = int(port)
af = _address_family(host)
return host, port, af
def collect_hosts(hosts, randomize=True):
"""
Collects a comma-separated set of hosts (host:port) and optionally
randomize the returned list.
"""
if isinstance(hosts, six.string_types):
hosts = hosts.strip().split(',')
result = []
afi = socket.AF_INET
for host_port in hosts:
host, port, afi = get_ip_port_afi(host_port)
if port < 0:
port = DEFAULT_KAFKA_PORT
result.append((host, port, afi))
if randomize:
shuffle(result)
return result
def is_inet_4_or_6(gai):
"""Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
return gai[0] in (socket.AF_INET, socket.AF_INET6)
def dns_lookup(host, port, afi=socket.AF_UNSPEC):
"""Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
# XXX: all DNS functions in Python are blocking. If we really
# want to be non-blocking here, we need to use a 3rd-party
# library like python-adns, or move resolution onto its
# own thread. This will be subject to the default libc
# name resolution timeout (5s on most Linux boxes)
try:
return list(filter(is_inet_4_or_6,
socket.getaddrinfo(host, port, afi,
socket.SOCK_STREAM)))
except socket.gaierror as ex:
log.warning('DNS lookup failed for %s:%d,'
' exception was %s. Is your'
' advertised.listeners (called'
' advertised.host.name before Kafka 9)'
' correct and resolvable?',
host, port, ex)
return []
| 45.023339 | 133 | 0.593203 |
c3f55e4181ffb8432f96f2204113a1fd9acaa004 | 2,513 | py | Python | alipay/aop/api/domain/AlipayFundCouponOperationQueryModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayFundCouponOperationQueryModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayFundCouponOperationQueryModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundCouponOperationQueryModel(object):
def __init__(self):
self._auth_no = None
self._operation_id = None
self._out_order_no = None
self._out_request_no = None
@property
def auth_no(self):
return self._auth_no
@auth_no.setter
def auth_no(self, value):
self._auth_no = value
@property
def operation_id(self):
return self._operation_id
@operation_id.setter
def operation_id(self, value):
self._operation_id = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
def to_alipay_dict(self):
params = dict()
if self.auth_no:
if hasattr(self.auth_no, 'to_alipay_dict'):
params['auth_no'] = self.auth_no.to_alipay_dict()
else:
params['auth_no'] = self.auth_no
if self.operation_id:
if hasattr(self.operation_id, 'to_alipay_dict'):
params['operation_id'] = self.operation_id.to_alipay_dict()
else:
params['operation_id'] = self.operation_id
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundCouponOperationQueryModel()
if 'auth_no' in d:
o.auth_no = d['auth_no']
if 'operation_id' in d:
o.operation_id = d['operation_id']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
return o
| 29.22093 | 79 | 0.606844 |
44a7a6fd9c42fde3d292575d943a6c7a76a3d06f | 3,999 | py | Python | sdks/python/appcenter_sdk/models/StepReport.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/StepReport.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/StepReport.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class StepReport(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'finished_snapshots': 'array',
'device_screenshots': 'array'
}
attribute_map = {
'finished_snapshots': 'finished_snapshots',
'device_screenshots': 'device_screenshots'
}
def __init__(self, finished_snapshots=None, device_screenshots=None): # noqa: E501
"""StepReport - a model defined in Swagger""" # noqa: E501
self._finished_snapshots = None
self._device_screenshots = None
self.discriminator = None
if finished_snapshots is not None:
self.finished_snapshots = finished_snapshots
if device_screenshots is not None:
self.device_screenshots = device_screenshots
@property
def finished_snapshots(self):
"""Gets the finished_snapshots of this StepReport. # noqa: E501
:return: The finished_snapshots of this StepReport. # noqa: E501
:rtype: array
"""
return self._finished_snapshots
@finished_snapshots.setter
def finished_snapshots(self, finished_snapshots):
"""Sets the finished_snapshots of this StepReport.
:param finished_snapshots: The finished_snapshots of this StepReport. # noqa: E501
:type: array
"""
self._finished_snapshots = finished_snapshots
@property
def device_screenshots(self):
"""Gets the device_screenshots of this StepReport. # noqa: E501
:return: The device_screenshots of this StepReport. # noqa: E501
:rtype: array
"""
return self._device_screenshots
@device_screenshots.setter
def device_screenshots(self, device_screenshots):
"""Sets the device_screenshots of this StepReport.
:param device_screenshots: The device_screenshots of this StepReport. # noqa: E501
:type: array
"""
self._device_screenshots = device_screenshots
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StepReport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.622222 | 91 | 0.604401 |
329f6843fdcae99692e34306445c93f7097bfdda | 1,225 | py | Python | mayan/apps/statistics/classes.py | camerondphillips/MAYAN | b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f | [
"Apache-2.0"
] | null | null | null | mayan/apps/statistics/classes.py | camerondphillips/MAYAN | b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f | [
"Apache-2.0"
] | 1 | 2022-03-12T01:03:39.000Z | 2022-03-12T01:03:39.000Z | mayan/apps/statistics/classes.py | camerondphillips/MAYAN | b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f | [
"Apache-2.0"
] | null | null | null | class StatisticNamespace(object):
_registry = {}
@classmethod
def get_all(cls):
return cls._registry.values()
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, name, label):
self.name = name
self.label = label
self._statistics = []
self.__class__._registry[name] = self
def __unicode__(self):
return unicode(self.label)
def add_statistic(self, statistic):
self._statistics.append(statistic)
statistic.namespace = self
@property
def id(self):
return self.name
@property
def statistics(self):
return self._statistics
class Statistic(object):
_registry = {}
@classmethod
def get_all(cls):
return cls._registry.values()
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, name, label):
self.name = name
self.label = label
self.__class__._registry[name] = self
def __unicode__(self):
return unicode(self.label)
def get_results(self, *args, **kwargs):
return NotImplementedError
@property
def id(self):
return self.name
| 20.762712 | 45 | 0.613878 |
181af8de6ba8e890f8ef5b972481f45105252dbd | 3,176 | py | Python | api_site/src/api_x/zyt/evas/lianlian_pay/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | 1 | 2019-10-14T11:51:49.000Z | 2019-10-14T11:51:49.000Z | api_site/src/api_x/zyt/evas/lianlian_pay/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | api_site/src/api_x/zyt/evas/lianlian_pay/__init__.py | webee/pay | b48c6892686bf3f9014bb67ed119506e41050d45 | [
"W3C"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import url_for
from .commons import generate_absolute_url
from pytoolbox.util.sign import Signer
from pytoolbox.util.log import get_logger
from ..error import PaymentTypeNotSupportedError
from api_x.config import lianlian_pay as config
logger = get_logger(__name__)
NAME = 'LIANLIAN_PAY'
signer = Signer('key', 'sign')
def payment_param(payment_type, source, user_id, user_created_on, ip, order_no, ordered_on, order_name, order_desc,
amount):
from ._payment import pay_param as _pay_param, wap_pay_param as _wap_pay_param, app_params as _app_params
return_url = generate_absolute_url(url_for('lianlian_pay_entry.pay_result', order_no=order_no))
notify_url = generate_absolute_url(url_for('lianlian_pay_entry.pay_notify', source=source))
if payment_type == config.PaymentType.WEB:
return _pay_param(user_id, user_created_on, ip, order_no, ordered_on, order_name, order_desc, amount,
return_url, notify_url)
elif payment_type == config.PaymentType.WAP:
return _wap_pay_param(user_id, user_created_on, ip, order_no, ordered_on, order_name, order_desc, amount,
return_url, notify_url, config.AppRequest.WAP)
elif payment_type == config.PaymentType.APP:
return _app_params(user_id, user_created_on, ip, order_no, ordered_on, order_name, order_desc, amount,
notify_url)
else:
raise PaymentTypeNotSupportedError(NAME, payment_type)
def refund(source, refund_no, refunded_on, amount, paybill_id):
from ._refund import refund as _refund
notify_url = generate_absolute_url(url_for('lianlian_pay_entry.refund_notify', source=source))
return _refund(refund_no, refunded_on, amount, paybill_id, notify_url)
def query_refund(refund_no, refunded_on, oid_refundno=''):
from ._refund import refund_query
return refund_query(refund_no, refunded_on, oid_refundno)
def pay_to_bankcard(source, no_order, money_order, info_order,
flag_card, card_type, card_no, acct_name,
bank_code='', province_code='', city_code='', brabank_name='',
prcptcd=''):
from ._pay_to_bankcard import pay_to_bankcard as _pay_to_bankcard
notify_url = generate_absolute_url(url_for('lianlian_pay_entry.pay_to_bankcard_notify', source=source))
return _pay_to_bankcard(no_order, money_order, info_order, notify_url,
flag_card, card_type, card_no, acct_name,
bank_code, province_code, city_code, brabank_name, prcptcd)
def query_bin(card_no):
from .bankcard import query_bin as _query_bin
return _query_bin(card_no)
def query_refund_notify(source, refund_no, refunded_on, vas_name):
""" 通过主动查询订单结果来完成结果通知
:param source: refund来源
:param refund_no: 退款订单号
:param refunded_on: 退款订单时间
:param vas_name: 支付方式名称
:return:
"""
from ._refund import refund_query
from .notify import notify_refund
data = refund_query(refund_no, refunded_on)
return notify_refund(source, data)
| 37.809524 | 115 | 0.723552 |
49c581f8e3435e92045486cbb069f4150e488932 | 19,115 | py | Python | scripts/deduplicate_similar_inputs.py | annikagable/annotation_system_assessment | 0b2cc5245fdf02ef8089278ac0c887505db41122 | [
"MIT"
] | null | null | null | scripts/deduplicate_similar_inputs.py | annikagable/annotation_system_assessment | 0b2cc5245fdf02ef8089278ac0c887505db41122 | [
"MIT"
] | null | null | null | scripts/deduplicate_similar_inputs.py | annikagable/annotation_system_assessment | 0b2cc5245fdf02ef8089278ac0c887505db41122 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import sys
import scipy.stats
import sklearn.cluster
import os
import multiprocessing as mp
def _spearman(data_matrix, i, j, min_overlap, absolute):
'''
Internal function, called by calculate_spearman. Get the two rows of the data matrix
that correspond to the two user inputs, get their overlap, and calculate Spearman's rho
if the overlap is above our threshold.
Return the tuple (i,j,R2). i and j are the numeric indices of the two user inputs.
'''
# Get all finite protein values that are present in both inputs
x_y = data_matrix.iloc[[i,j],:]
x_y_finite = x_y.dropna(axis = 1, how = 'any')
x, y = x_y_finite.values
if absolute:
x = abs(x)
y = abs(y)
#import pdb; pdb.set_trace()
overlap = len(x)
if (overlap < min_overlap):
# Need to filter out small array lengths first because e.g. on an array of length 0 we cannot
# calculate standard deviation.
R2 = 0.0
else:
# Check if any of the x and y are constant because this can happen for datasets with e.g. many
# zeros, where the non-zeros are excluded because they are not present in the other input.
# Constant input to the spearman calculation would result in NaN. Although technically incorrect, I
# will replace this NaN by a zero because there is no evidence of the two user inputs being related.
std_x = np.std(x) # this can produce RuntimeWarnings when the mean is 0. Cannot be caught while running in parallel.
std_y = np.std(y)
# Set a more or less arbitrary tolerance threshold for the standard deviation under which we
# consider the input to be constant.
tolerance = 1e-12
if (std_x < tolerance) or (std_y < tolerance):
R2 = 0.0
else:
R, pval = scipy.stats.spearmanr(x,y) # any RuntimeWarnings will have been prevented
R2 = R**2 # we want positive and negative correlations
return (i, j, R2)
def _sym_diff(data_matrix, i, j):
'''
Internal function to calculate the symmetric difference between two user inputs.
'''
# Turn the two pandas rows into a boolean numpy array of the same dimension,
# stating whether they have a protein entry or not
isnan = np.isnan(data_matrix.iloc[[i,j],:].values)
# Each protein gets a True value only if it is in one row but not the other
xor = np.logical_xor(isnan[0], isnan[1])
# Sum the number of proteins that occur in one row but not the other
symmetric_difference = np.sum(xor)
return i, j, symmetric_difference
def calculate_metric(data_matrix,
metric_type = "spearman",
absolute = False,
min_overlap = 100,
metric_matrix_dir = 'data/interim/metric_matrices/9606',
nr_cores = 10,
parallelization_threshold = 50):
'''
This function calculates correlation/ distance metrics between all user inputs based on the input values.
Available are the (absolute) Spearman correlation R^2 and the Symmetric Difference.
It does so in an incremental way, meaning that if a metric has already been calculated for some
of the user inputs, these values can be provided to the function and don't need to be re-calculated.
data_matrix: A dataframe with dataIds as index, and protein shorthands as columns, filled with the
user-provided values (each row is one user input) or NaN for proteins which do not
appear in the particular input.
metric_type: One of "spearman" or "sym_diff". Whether to calculate Spearman's R^2 or Symmetric Difference.
min_overlap: Minimum number of proteins present in both dataIds in order to calculate a
correlation coefficient. dataId pairs that have an overlap < min_overlap will get
an Rˆ2 of 0. Only for spearman.
absolute: Whether to calculate the correlation on the absolute input values or the original input values.
Only for spearman.
metric_matrix_dir: The directory where pre-calculated metrics dataId x dataId dataframes will be stored as tsv.
nr_cores: The number of cores to be used in parallel computation.
parallelization_threshold: The minimum number of user inputs for running the computation in parallel.
Of course, parallel computation may not be required if there is only one new
dataId being added and all other values are already calculated. But at
parallelization_threshold = 50, this means that at least 49 correlations/distance
values will have to be calculated, which can for sure be parallelized.
Returns: A dataId x dataId dataframe with the correlation / distance metric values for all dataId pairs.
'''
if metric_type not in ["spearman", "sym_diff"]:
sys.exit("metric_type has to be either 'spearman' or 'sym_diff'.")
dataIds = data_matrix.index.to_list()
dim = len(dataIds)
metric_matrix = np.zeros((dim, dim)) + np.nan
if (metric_type == "spearman") and not absolute:
existing_metric_file = f"spearman.overlap_{min_overlap}.tsv"
elif (metric_type == "spearman") and absolute:
existing_metric_file = f"spearman_abs.overlap_{min_overlap}.tsv"
else:
existing_metric_file = "sym_diff.tsv"
existing_metric_file = os.path.join(metric_matrix_dir, existing_metric_file)
if os.path.exists(existing_metric_file):
existing_metric_df = pd.read_table(existing_metric_file, index_col=0, header=0)
existing_dataIds = existing_metric_df.index.to_list()
else:
existing_metric_df = None
existing_dataIds = []
dataId_indices_to_calculate = [i for i, d in enumerate(dataIds) if d not in existing_dataIds]
# Enumerate all the combinations that have to be calculated and remove duplicates
# Since we're not dealing with symmetric coordinates, we can't use the old "if i > j" trick for
# calculating only one triangle.
coords = [(i,j) for i in range(dim) for j in dataId_indices_to_calculate]
coords = [tuple(sorted(pair)) for pair in coords]
coords = list(set(coords))
## Check if it's worth it to parallelize. I could also make this dependent on how many coordinates
## have to be calculated.
if len(data_matrix) > parallelization_threshold :
## parallel computation
print("Opening pool")
pool = mp.Pool(nr_cores)
try:
if metric_type == "spearman":
results = pool.starmap_async(_spearman,
[(data_matrix, i, j, min_overlap, absolute) for i,j in coords]).get()
else:
results = pool.starmap_async(_sym_diff,
[(data_matrix, i, j) for i,j in coords]).get()
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
pool.join()
sys.exit()
else:
pool.close()
print("Pool closed")
pool.join()
for i, j, metric in results:
metric_matrix[i,j] = metric
metric_matrix[j,i] = metric
else:
## sequential_computation
for i, j in coords:
if metric_type == "spearman":
i,j,metric = _spearman(data_matrix, i, j, min_overlap, absolute)
elif metric_type == "sym_diff":
i,j,metric = _sym_diff(data_matrix, i, j)
else:
sys.exit("metric_type has to be either 'spearman' or 'sym_diff'.")
metric_matrix[i,j] = metric
metric_matrix[j,i] = metric
## Convert matrix to dataframe
metric_df = pd.DataFrame(metric_matrix, index = dataIds, columns = dataIds)
## Put the previously calculated data in the right place
if existing_metric_df is not None:
# Reducing the pre-calculated dataIds down to the ones that are actually in the
# current data to deduplicate.
desired_existing_dataIds = list(set(existing_dataIds).intersection(set(dataIds)))
idx = (desired_existing_dataIds, desired_existing_dataIds)
assert(all(metric_df.loc[idx].isna()))
metric_df.loc[idx] = existing_metric_df.loc[idx].values
# assert(all(metric_df.loc[existing_dataIds, existing_dataIds].isna()))
# metric_df.loc[existing_dataIds, existing_dataIds] = \
# existing_metric_df.loc[existing_dataIds, existing_dataIds].values
if metric_type == "sym_diff":
metric_df = metric_df.astype(int)
# Write metrics to file so that the next run can find these data.
os.makedirs(metric_matrix_dir, exist_ok=True)
metric_df.to_csv(existing_metric_file, sep = '\t')
return metric_df
def remove_similar_inputs_of_one_species(data_matrix,
min_overlap = 100,
r2_threshold = 0.8,
symm_diff_threshold = 2,
dedup_id_file = None,
metric_matrix_dir = 'data/interim/metric_matrices/9606',
nr_cores = 10,
parallelization_threshold = 50):
"""
dedup_id_file: a file containing the taxId where all dataIds will be written to.
"""
nr_all_inputs = len(data_matrix)
nr_of_inputs = {'full': nr_all_inputs}
print(f"Number of user inputs: {nr_all_inputs}:")
r2_dist_threshold = 1 - r2_threshold
### Calculcate distance metrics
print("Calculating spearman")
spearman_df = calculate_metric(data_matrix,
metric_type = "spearman",
min_overlap = min_overlap,
absolute = False,
metric_matrix_dir = metric_matrix_dir,
nr_cores = nr_cores,
parallelization_threshold = parallelization_threshold)
print("Calculating absolute spearman")
abs_spearman_df = calculate_metric(data_matrix,
metric_type = "spearman",
min_overlap = min_overlap,
absolute = True,
metric_matrix_dir = metric_matrix_dir,
nr_cores = nr_cores,
parallelization_threshold = parallelization_threshold)
print("Calculating symmetric difference")
sym_diff_df = calculate_metric(data_matrix,
metric_type = "sym_diff",
min_overlap = min_overlap,
metric_matrix_dir = metric_matrix_dir,
nr_cores = nr_cores,
parallelization_threshold = parallelization_threshold)
metric_dict = dict(spearman_r2_dist = dict(matrix = 1 - spearman_df,
threshold = r2_dist_threshold),
abs_spearman_r2_dist = dict(matrix = 1 - abs_spearman_df,
threshold = r2_dist_threshold),
symm_diff = dict(matrix = sym_diff_df,
threshold = symm_diff_threshold))
if nr_all_inputs <= 1:
non_duplicate_dataIds = sorted(data_matrix.index.to_list())
# Create a fake cluster label dataframe with zero as cluster label for all similarity metrix
zero_array = np.zeros((len(data_matrix),len(metric_dict)))
clustered_data = pd.DataFrame(zero_array, columns = metric_dict.keys(), index = data_matrix.index)
else:
### Agglomerative clustering
label_dict = dict()
for metric in metric_dict:
# Initialize a single linkage clustering
sl_clustering = sklearn.cluster.AgglomerativeClustering(affinity='precomputed',
compute_full_tree='auto',
linkage='single',
distance_threshold=metric_dict[metric]['threshold'],
n_clusters = None)
# Cluster
sl_clustering.fit(X= metric_dict[metric]['matrix'])
#import pdb; pdb.set_trace()
# Get the cluster labels
label_dict[metric] = sl_clustering.labels_
### Calculate number of proteins in input
input_sizes = (~data_matrix.isna()).sum(axis = 1)
assert( np.all(input_sizes.index.values == data_matrix.index.values))
### Combine input sizes and cluster labels into one dataframe
clustered_data = pd.DataFrame(label_dict, index = data_matrix.index)
clustered_data = clustered_data.assign(input_sizes = input_sizes)
### Sort by size and drop duplicate clusters
### We want the largest input to be kept. The other columns are just sorted by in order to get a deterministic outcome.
### The cluster labels per se do not provide any information so the sort order does not matter, only whether they are the same or not.
deduplicated_data = clustered_data.sort_values(['input_sizes'] + list(metric_dict.keys()), ascending = False)
### Save the number of inputs after each duplication step
for metric in metric_dict:
deduplicated_data.drop_duplicates(subset = metric, inplace=True)
print(f"After {metric}, {len(deduplicated_data) = }")
nr_of_inputs[metric] = len(deduplicated_data)
### Return the non-duplicated IDs
non_duplicate_dataIds = deduplicated_data.index.to_list()
non_duplicate_dataIds = sorted(non_duplicate_dataIds)
## Write deduplicated IDs to file.
if dedup_id_file:
with open(dedup_id_file, 'w') as out:
id_string = '\n'.join(non_duplicate_dataIds)+'\n'
out.write(id_string)
clustered_inputs_file = os.path.join(metric_matrix_dir, "single_linkage_clusters.tsv")
clustered_data.to_csv(clustered_inputs_file, sep = '\t')
return clustered_data, nr_of_inputs, non_duplicate_dataIds
_, species_matrix_file, metric_matrix_dir, nr_cores, min_overlap, dedup_id_file = sys.argv
nr_cores = int(nr_cores)
min_overlap = int(min_overlap)
species_matrix = pd.read_table(species_matrix_file, index_col=0, header=0)
## For each species, calculate the metrics between inputs, cluster similar inputs, and keep only one cluster representative.
clustered_data, nr_of_inputs, non_duplicate_dataIds = \
remove_similar_inputs_of_one_species(data_matrix = species_matrix,
min_overlap = min_overlap,
r2_threshold = 0.8,
symm_diff_threshold = 2,
dedup_id_file = dedup_id_file,
metric_matrix_dir = metric_matrix_dir,
nr_cores = nr_cores,
parallelization_threshold = 40)
#######################
##### TESTS ###########
#######################
## Read input matrix (columns are proteins, rows are inputs)
# species_matrix_file = "data/interim/species_matrices/9606.tsv"
# species_matrix = pd.read_table(species_matrix_file, index_col=0, header=0)
# ## Verify that there are no infinite values
# all(species_matrix.apply(lambda row: all(np.isfinite(row.dropna())), axis = 1))
# ## Verify that all inputs fit into float64 dtype
# all(species_matrix.apply(lambda row: row.dtype, axis = 1) == "float64")
# # Check min and max values
# all(species_matrix.apply(lambda row: row.max(), axis = 1) < 1e200)
# all(species_matrix.apply(lambda row: row.min(), axis = 1) > -1e200)
#ids = "DFwDoizXLnN3 FXLCF05mc7Gl L00P8PBTTqNk fZwwiHVs7Glc".split()
# clustered_data, nr_of_inputs, non_duplicate_dataIds = \
# remove_similar_inputs_of_one_species(data_matrix = species_matrix.iloc[0:700,:],
# min_overlap = 100,
# r2_threshold = 0.8,
# symm_diff_threshold = 2,
# dedup_id_file = None,
# metric_matrix_dir = 'test',#'data/interim/metric_matrices/9606',
# nr_cores = 10,
# parallelization_threshold = 10)
# spearman_df = calculate_metric(data_matrix = species_matrix.iloc[0:500,:],
# metric_type = "spearman",
# min_overlap = 100,
# absolute = False,
# metric_matrix_dir = 'test',
# nr_cores = 10,
# parallelization_threshold = 50)
# sym_diff_df = calculate_metric(data_matrix = species_matrix.iloc[0:200,:],
# metric_type = "sym_diff",
# min_overlap = 100,
# absolute = False,
# metric_matrix_dir = 'test',#'data/interim/metric_matrices/9606',
# nr_cores = 2,
# parallelization_threshold = 2)
| 43.841743 | 142 | 0.562229 |
24c51f248d7070a3c192a2c317eeb0e035edf3bb | 16,798 | py | Python | sw_joystick.py | SequentialBase9915/Autodmax | a5cfcf87b607bf2a4e0e92b74c7010ee6ac23a0f | [
"MIT"
] | null | null | null | sw_joystick.py | SequentialBase9915/Autodmax | a5cfcf87b607bf2a4e0e92b74c7010ee6ac23a0f | [
"MIT"
] | null | null | null | sw_joystick.py | SequentialBase9915/Autodmax | a5cfcf87b607bf2a4e0e92b74c7010ee6ac23a0f | [
"MIT"
] | null | null | null | import pygame
import wx
from pygame.math import Vector2
from sw_serial import *
import cv2
import numpy as np
from datetime import date
# Initialize the serial connection and then joysticks
pygame.joystick.init()
joy_count = pygame.joystick.get_count()
if joy_count > 0:
xbox_360 = pygame.joystick.Joystick(0)
xbox_360.init()
print(xbox_360.get_name())
###############################################################################
# Joystick controls
###############################################################################
def button_press():
btn_dict = {
0:BTN_A,1:BTN_B,2:BTN_X,3:BTN_Y,4:BTN_L,
5:BTN_R,6:BTN_MINUS,7:BTN_PLUS,8:BTN_HOME}
# Mapped from XBOX 360 to Switch
btn_list = []
for button in btn_dict:
if xbox_360.get_button(button):
btn_list.append(btn_dict[button])
return (sum(btn_list))
def hat_motion():
hat_xy = [[-1,0],[1,0],[0,1],[0,-1],[0,0]]
# X, Y dpad values from XBOX controller
hat_btn = [DPAD_L,DPAD_R,DPAD_U,DPAD_D,DPAD_CENTER]
# Corresponding action in Switch controller
for hat in hat_xy:
if xbox_360.get_hat(0) == (hat[0],hat[1]):
trigger_hat = hat_btn[hat_xy.index(hat)]
return trigger_hat
def z_axis():
if xbox_360.get_axis(2) >= .5:
z_btn = BTN_ZL
elif xbox_360.get_axis(2) <= -.5:
z_btn = BTN_ZR
else:
z_btn = 0
return z_btn
def stick_move():
left_x = xbox_360.get_axis(0)
left_y = xbox_360.get_axis(1)
right_x = xbox_360.get_axis(4)
right_y = xbox_360.get_axis(3)
axis_list = [[left_x,left_y],[right_x,right_y]]
angle_list = []
for stick in axis_list:
vec = Vector2(stick[0],stick[1])
rad, ang = vec.as_polar()
if ang <= 0:
ang = (-1) * ang
elif ang >= 0:
ang = 360 - ang
angle_list.append(ang)
# Need to input intensity -> value between 50 (0x32) and 255 (0xFF).
# rad adjusted to 1 if over 255 as it resets above this value.
stick_intsy = []
for stick in axis_list:
power = Vector2(stick[0],stick[1])
rad, ang = power.as_polar()
if rad > 1:
rad = 1
intsy = int(rad * 255)
stick_intsy.append(intsy)
# read the x and y from each stick and if > 0 consider pressed
# then send the stick signal
#if blank send center command (shared for L and R as it's the same value)
state_axes = []
for joy in axis_list:
which_stick = axis_list.index(joy)
state_joy = 0
for axis in joy:
if -1 <= axis <= -0.1 or 0.1 <= axis <= 1:
state_joy += 1
else:
state_joy += 0
if state_joy > 0:
state_axes.append(
stick_angle(int(angle_list[which_stick]),
stick_intsy[which_stick], which_stick)
)
else:
state_axes.append(LSTICK_CENTER)
joy_cmd = state_axes[0] + state_axes[1]
return joy_cmd
def timemov(direction, length, stick=0):
i = 0
while i < length:
cmd = stick_angle(direction, 0xFF,stick)
send_cmd(cmd)
i += 1
def btn_press(button, wait):
send_cmd(button)
p_wait(0.1)
send_cmd(BTN_NONE)
p_wait(wait)
def stick_press(dpad,wait):
send_cmd(dpad)
p_wait(0.05)
send_cmd(LSTICK_CENTER)
p_wait(wait)
def hat_press(hat,wait):
send_cmd(BTN_NONE + hat+ LSTICK_CENTER + RSTICK_CENTER)
p_wait(0.07)
send_cmd(DPAD_CENTER)
p_wait(wait)
###############################################################################
# Keyboard inputs
###############################################################################
def kbd_input(key_name):
if key_name == "0":
print("Starting egg hatching macro...")
hatch_egg_macro()
if key_name == "2":
print("Starting release macro...")
release_macro()
if key_name == "3":
print("Next loop")
encounter_loop()
if key_name == "5":
print("Starting batch hatching macro")
batch_hatch_macro()
if key_name == "7":
print("Starting egg fetching macro...")
get_eggs_macro()
elif key_name == "w":
cmd = stick_angle(90, 0xFF,0)
send_cmd(cmd)
elif key_name == "s":
cmd = stick_angle(270, 0xFF,0)
send_cmd(cmd)
elif key_name == "a":
cmd = stick_angle(180, 0xFF,0)
send_cmd(cmd)
elif key_name == "d":
cmd = stick_angle(360, 0xFF,0)
send_cmd(cmd)
elif key_name == "q":
send_cmd(RSTICK_L)
elif key_name == "e":
send_cmd(RSTICK_R)
elif key_name == "up":
hat_press(DPAD_U,0.01)
elif key_name == "down":
hat_press(DPAD_D,0.01)
elif key_name == "left":
hat_press(DPAD_L,0.01)
elif key_name == "right":
hat_press(DPAD_R,0.01)
elif key_name == "return":
btn_press(BTN_A,0.05)
elif key_name == "backspace":
btn_press(BTN_B,0.05)
elif key_name == "x":
btn_press(BTN_X,0.05)
elif key_name == "y":
btn_press(BTN_Y,0.05)
elif key_name == "r":
btn_press(BTN_R,0.05)
elif key_name == "l":
btn_press(BTN_L,0.05)
elif key_name == "-":
btn_press(BTN_MINUS,0.05)
elif key_name == ";":
btn_press(BTN_PLUS,0.05)
elif key_name == "h":
btn_press(BTN_HOME,0.05)
###############################################################################
# Macros
###############################################################################
class swmacro:
app = wx.App() #One App per process, may need to move further up in future
def __init__(self,msg,title,lower,upper):
self.msg = msg
self.title = title
self.lower = lower
self.upper = upper
def macro_prompt(self):
frame = wx.Frame(None, -1, 'win.py')
frame.SetSize(0,0,100,100)
dlg = wx.TextEntryDialog(frame, self.msg,self.title)
if dlg.ShowModal() == wx.ID_OK:
dlg.GetValue()
int_input = int(dlg.GetValue())
if int_input == None:
return None
if int_input in range(self.lower,self.upper+1):
return int_input
else:
dialog = wx.MessageDialog(
frame,"Incorrect amount", 'Error',wx.OK|wx.ICON_ERROR
)
dialog.ShowModal()
dialog.Destroy()
return swmacro.macro_prompt(self)
def hatch_egg_macro():
#Needs full party, town map first option, start w/o bike, egg ready to pick up
egg_group = swmacro(
'Choose Egg Group # (Starters(1) Longer(2) Very long(3): ',
'Egg cycle type',1,3
)
group = egg_group.macro_prompt()
egg_hatch = swmacro('Number to hatch','Egg hatch count',1,600)
eggs = egg_hatch.macro_prompt()
if egg_group == None or egg_hatch == None:
print("Macro aborted due to no entry")
return None
steps_dict = {1:12800,2:14800,3:21200}
try:
steps = steps_dict.get(group)
for i in range(eggs):
btn_press(BTN_X,1) #Open menu - assuming map is first btn available
btn_press(BTN_A,3) #Open map
btn_press(BTN_A,1.5) #Select location assuming nursery is location
btn_press(BTN_A,1) #Confirm
btn_press(BTN_A,3) #Travel
timemov(260,200) #Move back to egg person
btn_press(BTN_L,1.3) #Swivel screen around to face
btn_press(BTN_A,1.1) #open conversation
btn_press(BTN_A,2.2) #next line
btn_press(BTN_A,3.2) #"Yes" to accept egg
btn_press(BTN_A,2.2) #Confirmation
btn_press(BTN_A,1.3) #Add to roster
btn_press(BTN_A,2.2) #Confirmation
stick_press(LSTICK_D,0.7) #Down 1 slot
stick_press(LSTICK_D,0.7) #Down another slot (now on 3rd slot)
btn_press(BTN_A,2.5) #Confirmation
btn_press(BTN_A,2.5) #Confirmation
btn_press(BTN_A,2) #Confirmation
send_cmd(stick_angle(270, 0xFF,0))#turn around
p_wait(0.3)
# btn_press(BTN_PLUS,1) #Get on bike
timemov(250,700) #Move into place
btn_press(BTN_PLUS,1.2)
# 360 Circle @ Full Intensity
for r in range(steps):
send_cmd(LSTICK_L+RSTICK_R)
btn_press(BTN_A,18)#After egg hatches
btn_press(BTN_A,5)#After egg hatches
btn_press(BTN_PLUS,2.3)
print(i)
except:
print("Incorrect Group")
def get_eggs_macro():
get_egg = swmacro('Number of eggs to get','Egg fetching',1,150)
egg_count = get_egg.macro_prompt()
if egg_count == None:
print("Macro aborted due to no entry")
return None
print("Getting %s eggs" % egg_count)
#Needs to have egg ready. full slots
for g in range (egg_count):
btn_press(BTN_X,1) #Open menu - assuming map is first btn available
btn_press(BTN_A,3) #Open map
btn_press(BTN_A,1.5) #Select location assuming nursery is location
btn_press(BTN_A,1) #Confirm
btn_press(BTN_A,3) #Travel
timemov(260,200) #Move back to egg person
btn_press(BTN_L,1) #Swivel screen around to face
btn_press(BTN_A,1) #open conversation
btn_press(BTN_A,2) #next line
btn_press(BTN_A,3.2) #"Yes" to accept egg
btn_press(BTN_A,2) #Confirmation
stick_press(LSTICK_D,0.5) #Down 1 slot
btn_press(BTN_A,1.5) #Confirm
btn_press(BTN_A,2) #Confirm
btn_press(BTN_A,1) #exit conversation
send_cmd(stick_angle(270, 0xFF,0))#turn around
p_wait(0.2)
timemov(250,700) #Move into place
btn_press(BTN_PLUS,1)
# 360 Circle @ Full Intensity
r = 0
for r in range (3000):
#estimated hatch rate 256 steps x 5 sets at 80% rate
# and using ratio of steps with egg hatching
send_cmd(LSTICK_L+RSTICK_R)
btn_press(BTN_PLUS,1)
#return egg_count
def batch_hatch_macro():
steps = 13000
#Start at current inventory (2nd position down)
#Eggs need to be in second column, with blank column at first
btn_press(BTN_Y,1) #Change cursor
btn_press(BTN_Y,1) #Change again
btn_press(BTN_A,1) #Select first item
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)#Move down to fifth item
btn_press(BTN_A,1)#Select and pick up
hat_press(DPAD_R,0.5)#Move over to inventory
hat_press(DPAD_U,0.5) #Shift up
btn_press(BTN_A,1) #Set down in box
hat_press(DPAD_R,0.3) #Move to row of eggs
btn_press(BTN_A,1) #Select first egg
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)#Move down to fifth egg
btn_press(BTN_A,1) #Select and pick up
hat_press(DPAD_L,0.5)
hat_press(DPAD_L,0.5)#Move to the てもち list
hat_press(DPAD_D,0.3)#Position for placement
btn_press(BTN_A,1) #Place in active roster
btn_press(BTN_B,1.5)
btn_press(BTN_B,1.5)
btn_press(BTN_B,1.5) #get back to overworld
btn_press(BTN_PLUS,1) #Get on bike
for r in range(steps):
send_cmd(LSTICK_L+RSTICK_R)
for i in range(4):
btn_press(BTN_A,18)#After egg hatches
btn_press(BTN_A,6)#After egg hatches
#Timing is currently INCORRECT, needs to be a bit longer and test
if i != 4:
for r in range(200):
send_cmd(LSTICK_L+RSTICK_R)
else:
btn_press(BTN_PLUS,2.3)
def encounter_loop():
i = 0
try:
while True:
#Start in battle in order to shiny check
btn_press(BTN_HOME,0.7) #Go to home screen
btn_press(BTN_X,0.8) #bring up exit menu
btn_press(BTN_A,2.8) #select exit
btn_press(BTN_A,1) #bring up profile select
btn_press(BTN_A,17.5) #open game
btn_press(BTN_A,6.5) #load game
btn_press(BTN_A,5) #open menu
btn_press(BTN_A,1.5) #confirm
#btn_press(BTN_A,1.1) #confirm (Regidrago)
#btn_press(BTN_A,1.5) #confirm (Regidrago)
btn_press(BTN_A,13.5) #start encounter
#set cv2 for screenshot
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
ret, frame = cap.read()
img_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask_color1 = np.asarray([14,0,50])
mask_color2 = np.asarray([24,255,255])
mask = cv2.inRange(img_hsv, mask_color1,mask_color2)
color_pct = (cv2.countNonZero(mask)/(img_hsv.shape[0] * img_hsv.shape[1]))*100
print(str(i)+' {:.5f}'.format(color_pct))
if i == 0:
if color_pct < 1.5:
cv2.imshow("Shiny",shiny_pic)
break
if i != 0:
if color_pct < 1.6:
cv2.imshow("Shiny",shiny_pic)
break
p_wait(3)
i += 1
except KeyboardInterrupt:
pass
def release_macro():
#Need to be at start position of box for deleting correctly
#Look into adding support for an arbitrary start point
release_pokemon = swmacro(
'Number of Pokemon to release',
'Pokemon Count',1,600
)
release_count = release_pokemon.macro_prompt()
if release_count == None:
print("Macro aborted due to no entry")
return None
r = 1
b = 1
try:
for i in range(release_count):
btn_press(BTN_A,0.5)
hat_press(DPAD_U,0.2)
hat_press(DPAD_U,0.2)
btn_press(BTN_A,0.7)
hat_press(DPAD_U,0.2)
btn_press(BTN_A,1.2)
btn_press(BTN_A,0.9)
print("Released %s" % i)
if (i+1) % 6 == 0 and (i+1) % 30 != 0:
#Added 1 to account for r starting at 0
hat_press(DPAD_D,0.8)
print("%s end of row" % (i+1))
r += 1
print(r)
elif (i+1) % 30 == 0:
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_R,0.3)
hat_press(DPAD_R,0.3)
btn_press(BTN_R,0.8)
print("%s end of box" % (i+1))
b += 1
r += 1
print(b)
else:
if (b % 2 != 0 and r % 2 == 0) or (r % 2 != 0 and b % 2 == 0):
hat_press(DPAD_L,0.3)
print("%s next value (left)" % (i+1))
else:
hat_press(DPAD_R,0.3)
print("%s next value (right)" % (i+1))
except:
print("Incorrect value")
def watt_farmer():
i = 0
day = 1
while i < 1000:
print(f"Current day is {day}")
if day == 1:
btn_press(BTN_A,1.1)
btn_press(BTN_A,3)
else:
btn_press(BTN_A,1)
btn_press(BTN_A,1)
btn_press(BTN_A,1.5)
btn_press(BTN_A,3)
# Enter battle search for first time (must be facing den)
btn_press(BTN_HOME,1)
hat_press(DPAD_R,0.3)
hat_press(DPAD_R,0.3)
hat_press(DPAD_D,0.4)
hat_press(DPAD_R,0.3)
btn_press(BTN_A,1)
# Brings to settings screen
for r in range (14):
hat_press(DPAD_D,0.2)
# To quickly scroll down the list
hat_press(DPAD_R,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
btn_press(BTN_A,0.6)
hat_press(DPAD_D,0.3)
hat_press(DPAD_D,0.3)
btn_press(BTN_A,0.6)
# Brings up time settings
hat_press(DPAD_R,0.2)
hat_press(DPAD_R,0.2)
hat_press(DPAD_U,0.2)
btn_press(BTN_A,0.2)
btn_press(BTN_A,0.2)
btn_press(BTN_A,0.2)
btn_press(BTN_A,0.2)
btn_press(BTN_HOME,1.4)
# Moves date up and moves back to home screen
btn_press(BTN_A,1.5)
btn_press(BTN_B,1)
btn_press(BTN_A,4)
# Exit battle
if day == 31:
day = 1
else:
day += 1
i += 1
| 33.866935 | 90 | 0.545303 |
3e0bb6294f59e9d93e9432354676d3120bd61735 | 9,593 | py | Python | app/api/organizations.py | EandrewJones/srdp-database | 22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe | [
"MIT"
] | null | null | null | app/api/organizations.py | EandrewJones/srdp-database | 22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe | [
"MIT"
] | null | null | null | app/api/organizations.py | EandrewJones/srdp-database | 22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe | [
"MIT"
] | null | null | null | from flask import jsonify, request, url_for
from app import db
from app.api import bp
from app.api.groups import get_group
from app.api.auth import token_auth
from app.api.errors import bad_request
from app.api_spec import (
NonviolentTacticsSchema,
OrganizationSchema,
ViolentTacticsSchema,
)
from app.models import NonviolentTactics, Organizations, ViolentTactics
@bp.route("/organizations", methods=["GET"])
@token_auth.login_required
def get_organizations():
"""
---
get:
summary: get organizations
description: retrieve all organizations
security:
- BasicAuth: []
- BearerAuth: []
responses:
'200':
description: call successful
content:
application/json:
schema: OrganizationSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
page = request.args.get("page", 1, type=int)
per_page = min(request.args.get("per_page", 10, type=int), 100)
data = Organizations.to_collection_dict(
Organizations.query, page, per_page, OrganizationSchema, "api.get_organizations"
)
return jsonify(data)
@bp.route("/organizations/<int:facId>", methods=["GET"])
@token_auth.login_required
def get_organization(facId):
"""
---
get:
summary: Get single organization
description: retrieve an organization by facId
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: Numeric facId of the organization to retrieve
responses:
'200':
description: call successful
content:
application/json:
schema: OrganizationSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
organization = Organizations.query.filter_by(facId=facId).first_or_404()
return OrganizationSchema().dump(organization)
@bp.route("/organizations/<int:facId>/group", methods=["GET"])
@token_auth.login_required
def get_org_group(facId):
"""
---
get:
summary: Get organization's parent ethnolinguistic group
description: retrieve ethnolingustic group for which an organization makes claims
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: Numeric facId of the organization to fetch group for
responses:
'200':
description: call successful
content:
application/json:
schema: GroupSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
organization = Organizations.query.filter_by(facId=facId).first_or_404()
return get_group(kgcId=organization.kgcId)
@bp.route("/organizations/<int:facId>/nonviolent_tactics", methods=["GET"])
@token_auth.login_required
def get_org_nonviolent_tactics(facId):
"""
---
get:
summary: Get an organization's non-violent tactics
description: Retrieves and organization's non-violent tactics
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: Numeric facId of the organization to retrieve non-violent tactics for
responses:
'200':
description: call successful
content:
application/json:
schema: NonviolentTacticsSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
nonviolent_tactics = NonviolentTactics.query.filter_by(facId=facId)
response = jsonify(NonviolentTacticsSchema(many=True).dump(nonviolent_tactics))
response.status_code = 200
response.headers["Location"] = url_for("api.get_nonviolent_tactics")
return response
@bp.route("/organizations/<int:facId>/violent_tactics", methods=["GET"])
@token_auth.login_required
def get_org_violent_tactics(facId):
"""
---
get:
summary: Get an organization's violent tactics
description: Retrieves an organization's violent tactics
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: Numeric facId of the organization to retrieve violent tactics for
responses:
'200':
description: call successful
content:
application/json:
schema: ViolentTacticsSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
violent_tactics = ViolentTactics.query.filter_by(facId=facId)
response = jsonify(ViolentTacticsSchema(many=True).dump(violent_tactics))
response.status_code = 200
response.headers["Location"] = url_for("api.get_violent_tactics")
return response
@bp.route("/organizations", methods=["POST"])
@token_auth.login_required
def create_orgs():
"""
---
post:
summary: Create one or more organizations
description: create new organizations for authorized user
security:
- BasicAuth: []
- BearerAuth: []
requestBody:
required: true
content:
application/json:
schema: GroupInputSchema
responses:
'201':
description: call successful
content:
application/json:
schema: OrganizationSchema
'401':
description: Not authenticated
tags:
- Organizations
"""
data = request.get_json() or {}
# If single entry, regular add
if isinstance(data, dict):
if (
"facId" in data
and Organizations.query.filter_by(facId=data["facId"]).first()
):
return bad_request(
f"facId {data['facId']} already taken; please use a different id."
)
organization = Organizations()
organization.from_dict(data)
db.session.add(organization)
db.session.commit()
response = jsonify(OrganizationSchema().dump(organization))
response.status_code = 201
response.headers["Location"] = url_for(
"api.get_organization", facId=organization.facId
)
# If multiple entries, bulk save
if isinstance(data, list):
organizations = []
for entry in data:
if (
"facId" in entry
and Organizations.query.filter_by(facId=entry["facId"]).first()
):
return bad_request(
f"facId {data['facId']} already taken; please use a different facId."
)
organization = Organizations()
organization.from_dict(entry)
organizations.append(organization)
db.session.add_all(organizations)
db.session.commit()
response = jsonify(OrganizationSchema(many=True).dump(organizations))
response.status_code = 201
response.headers["Location"] = url_for("api.get_organizations")
return response
@bp.route("/organizations/<int:facId>", methods=["PUT"])
@token_auth.login_required
def update_org(facId):
"""
---
put:
summary: Modify an organization
description: modify an organization by authorized user
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: facId of the organization to update
requestBody:
required: true
content:
application/json:
schema: OrganizationInputSchema
responses:
'200':
description: resource updated successful
content:
application/json:
schema: OrganizationSchema
'401':
description: Not authenticated
'204':
description: No content
tags:
- Organizations
"""
organization = Organizations.query.filter_by(facId=facId).first_or_404()
data = request.get_json() or {}
organization.from_dict(data)
db.session.commit()
response = jsonify(OrganizationSchema().dump(organization))
response.status_code = 200
response.headers["Location"] = url_for(
"api.get_organization", facId=organization.facId
)
return response
@bp.route("organizations/<int:facId>", methods=["DELETE"])
@token_auth.login_required
def delete_org(facId):
"""
---
delete:
summary: Delete an organization
description: delete organization by authorized user
security:
- BasicAuth: []
- BearerAuth: []
parameters:
- in: path
name: facId
schema:
type: integer
required: true
description: facId of the organization to be deleted
responses:
'401':
description: Not authenticated
'204':
description: No content
tags:
- Organizations
"""
organization = Organizations.query.filter_by(facId=facId).first_or_404()
db.session.delete(organization)
db.session.commit()
return "", 204
| 29.069697 | 92 | 0.607735 |
e5fd252ee35652a1f4b85c3b789128667e42b54b | 3,574 | py | Python | bindings/python/ensmallen/datasets/string/corynebacteriumdiphtheriae.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/corynebacteriumdiphtheriae.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/corynebacteriumdiphtheriae.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Corynebacterium diphtheriae.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CorynebacteriumDiphtheriae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Corynebacterium diphtheriae graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Corynebacterium diphtheriae graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CorynebacteriumDiphtheriae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.092593 | 223 | 0.67991 |
ae950234c6a37f0eb3fceddf63dfe684aa771a9d | 16,857 | py | Python | tests/unit/cirrus/package_tests.py | Maxsparrow/cirrus | ae9639daba4f2d8d9285e98d5b11a89eac573f96 | [
"Apache-2.0"
] | 12 | 2016-04-30T16:13:55.000Z | 2021-01-20T23:42:31.000Z | tests/unit/cirrus/package_tests.py | Maxsparrow/cirrus | ae9639daba4f2d8d9285e98d5b11a89eac573f96 | [
"Apache-2.0"
] | 153 | 2015-02-12T15:25:42.000Z | 2020-03-09T07:16:15.000Z | tests/unit/cirrus/package_tests.py | Maxsparrow/cirrus | ae9639daba4f2d8d9285e98d5b11a89eac573f96 | [
"Apache-2.0"
] | 7 | 2015-06-15T21:30:38.000Z | 2020-02-17T02:13:00.000Z | #!/usr/bin/env python
"""tests for package commands """
import unittest
import mock
import os
import json
import tempfile
import argparse
from cirrus._2to3 import ConfigParser, to_str
from cirrus.package import (
create_files,
setup_branches,
commit_and_tag,
build_parser,
backup_file,
init_package,
build_project
)
from .harnesses import CirrusConfigurationHarness
class BuildParserTest(unittest.TestCase):
"""test for cli parser"""
def test_build_parser(self):
argslist = ['init']
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, build_parser, argslist)
argslist = ['init', '-p', 'throwaway', '-s', 'src']
opts = build_parser(argslist)
self.assertEqual(opts.source, 'src')
self.assertEqual(opts.package, 'throwaway')
self.assertEqual(opts.master, 'master')
self.assertEqual(opts.develop, 'develop')
def test_build_parser_bad_package(self):
argslist = ['init', '-p', 'th-row-away', '-s', 'src']
with self.assertRaises(SystemExit):
build_parser(argslist)
class GitFunctionTests(unittest.TestCase):
"""
tests for git util functions
"""
def setUp(self):
"""
set up for tests
"""
self.tempdir = to_str(tempfile.mkdtemp())
self.repo = os.path.join(self.tempdir, 'throwaway')
os.mkdir(self.repo)
self.bak_file = os.path.join(self.repo, 'backmeup')
with open(self.bak_file, 'w') as handle:
handle.write("this file exists")
self.patch_working_dir = mock.patch('cirrus.package.working_dir')
self.mock_wd = self.patch_working_dir.start()
def tearDown(self):
self.patch_working_dir.stop()
if os.path.exists(self.tempdir):
os.system('rm -rf {}'.format(self.tempdir))
def test_backup_file(self):
"""test backup_file"""
backup_file(self.bak_file)
files = os.listdir(self.repo)
self.failUnless('backmeup' in files)
self.failUnless('backmeup.BAK' in files)
@mock.patch('cirrus.package.RepoInitializer')
@mock.patch('cirrus.package.get_active_branch')
@mock.patch('cirrus.package.branch')
def test_setup_branches(self, mock_branch, mock_active, mock_init):
"""test setup_branches"""
opts = mock.Mock()
opts.no_remote = False
opts.repo = self.repo
opts.origin = 'origin'
opts.develop = 'develop'
opts.master = 'master'
mock_active.return_value = 'develop'
mock_initializer = mock.Mock()
mock_initializer.init_branch = mock.Mock()
mock_init.return_value = mock_initializer
setup_branches(opts)
mock_initializer.init_branch.assert_has_calls([
mock.call('master', 'origin', remote=True),
mock.call('develop', 'origin', remote=True)
])
self.assertTrue(mock_active.called)
opts.no_remote = True
mock_initializer.reset_mocks()
setup_branches(opts)
mock_initializer.init_branch.assert_has_calls([
mock.call('master', 'origin', remote=True),
mock.call('develop', 'origin', remote=True)
])
@mock.patch('cirrus.package.commit_files_optional_push')
@mock.patch('cirrus.package.get_tags')
@mock.patch('cirrus.package.tag_release')
@mock.patch('cirrus.package.branch')
def test_commit_and_tag(self, mock_branch, mock_tag_rel, mock_tags, mock_commit):
opts = mock.Mock()
opts.no_remote = False
opts.repo = self.repo
opts.master = 'master'
opts.develop = 'develop'
opts.version = '0.0.0'
#tag doesnt exist
mock_tags.return_value = ['0.0.1']
commit_and_tag(opts, 'file1', 'file2')
self.failUnless(mock_commit.called)
mock_commit.assert_has_calls([
mock.call(
self.repo,
'git cirrus package init',
True,
'file1',
'file2'
)
])
self.failUnless(mock_tags.called)
self.failUnless(mock_tag_rel.called)
self.failUnless(mock_branch.called)
# tag exists
opts.version = '0.0.1'
mock_tag_rel.reset_mock()
commit_and_tag(opts, 'file1', 'file2')
self.failUnless(not mock_tag_rel.called)
class CreateFilesTest(unittest.TestCase):
"""mocked create_files function tests"""
def setUp(self):
"""
set up for tests
"""
self.tempdir = to_str(tempfile.mkdtemp())
self.repo = os.path.join(self.tempdir, 'throwaway')
src_dir = os.path.join(self.repo, 'src')
pkg_dir = os.path.join(self.repo, 'src', 'unittests')
os.mkdir(self.repo)
os.mkdir(src_dir)
os.mkdir(pkg_dir)
self.patch_environ = mock.patch.dict(
os.environ,
{'HOME': self.tempdir, 'USER': 'steve'}
)
self.patch_environ.start()
init_file = os.path.join(pkg_dir, '__init__.py')
with open(init_file, 'w') as handle:
handle.write('# initfile\n')
handle.write('__version__=\'0.0.0\'\n')
def tearDown(self):
self.patch_environ.stop()
if os.path.exists(self.tempdir):
os.system('rm -rf {}'.format(self.tempdir))
def test_create_files(self):
"""test create_files call and content of files"""
opts = mock.Mock()
opts.repo = self.repo
opts.source = 'src'
opts.version = '0.0.1'
opts.version_file = None
opts.test_mode = False
opts.templates = ['include steve/*']
opts.history_file = 'HISTORY.md'
opts.package = 'unittests'
opts.desc = "DESCRIPTION"
opts.org = "ORG"
opts.develop = 'develop'
opts.requirements = 'requirements.txt'
opts.test_requirements = 'test-requirements.txt'
opts.pypi_package_name = None
opts.python = None
opts.create_version_file = False
opts.gitignore_url = "GIT_IGNORE_URL"
opts.add_gitignore = False
create_files(opts)
dir_list = os.listdir(self.repo)
self.failUnless('cirrus.conf' in dir_list)
self.failUnless('HISTORY.md' in dir_list)
self.failUnless('MANIFEST.in' in dir_list)
self.failUnless('setup.py' in dir_list)
cirrus_conf = os.path.join(self.repo, 'cirrus.conf')
config = ConfigParser.RawConfigParser()
config.read(cirrus_conf)
self.assertEqual(config.get('package', 'name'), opts.package)
self.assertEqual(config.get('package', 'version'), opts.version)
history = os.path.join(self.repo, 'HISTORY.md')
with open(history, 'r') as handle:
self.failUnless('CIRRUS_HISTORY_SENTINEL' in handle.read())
manifest = os.path.join(self.repo, 'MANIFEST.in')
with open(manifest, 'r') as handle:
content = handle.read()
self.failUnless('include requirements.txt' in content)
self.failUnless('include cirrus.conf' in content)
self.failUnless('include steve/*' in content)
version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')
with open(version, 'r') as handle:
self.failUnless(opts.version in handle.read())
def test_create_files_with_version(self):
"""test create_files call and content of files"""
opts = mock.Mock()
opts.repo = self.repo
opts.create_version_file = True
opts.source = 'src'
opts.version = '0.0.1'
opts.org = "ORG"
opts.version_file = None
opts.test_mode = 'False'
opts.desc = "DESCRIPTION"
opts.templates = ['include steve/*']
opts.history_file = 'HISTORY.md'
opts.package = 'unittests'
opts.requirements = 'requirements.txt'
opts.pypi_package_name = None
opts.develop = 'develop'
opts.python = None
opts.gitignore_url = "GIT_IGNORE_URL"
opts.add_gitignore = False
opts.test_requirements = 'test-requirements.txt'
version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')
os.system('rm -f {}'.format(version))
create_files(opts)
dir_list = os.listdir(self.repo)
self.failUnless('cirrus.conf' in dir_list)
self.failUnless('HISTORY.md' in dir_list)
self.failUnless('MANIFEST.in' in dir_list)
self.failUnless('setup.py' in dir_list)
cirrus_conf = os.path.join(self.repo, 'cirrus.conf')
config = ConfigParser.RawConfigParser()
config.read(cirrus_conf)
self.assertEqual(config.get('package', 'name'), opts.package)
self.assertEqual(config.get('package', 'version'), opts.version)
history = os.path.join(self.repo, 'HISTORY.md')
with open(history, 'r') as handle:
self.failUnless('CIRRUS_HISTORY_SENTINEL' in handle.read())
manifest = os.path.join(self.repo, 'MANIFEST.in')
with open(manifest, 'r') as handle:
content = handle.read()
self.failUnless('include requirements.txt' in content)
self.failUnless('include cirrus.conf' in content)
self.failUnless('include steve/*' in content)
version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')
with open(version, 'r') as handle:
self.failUnless(opts.version in handle.read())
def test_create_files_with_python(self):
"""test create_files call and content of files"""
opts = mock.Mock()
opts.repo = self.repo
opts.create_version_file = True
opts.source = 'src'
opts.version = '0.0.1'
opts.version_file = None
opts.org = "ORG"
opts.desc = "DESCRIPTION"
opts.templates = []
opts.test_mode = False
opts.history_file = 'HISTORY.md'
opts.package = 'unittests'
opts.develop = 'develop'
opts.requirements = 'requirements.txt'
opts.pypi_package_name = 'pypi.package.unittest'
opts.python = 'python3'
opts.gitignore_url = "GIT_IGNORE_URL"
opts.add_gitignore = False
opts.test_requirements = 'test-requirements.txt'
version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')
os.system('rm -f {}'.format(version))
create_files(opts)
dir_list = os.listdir(self.repo)
self.failUnless('cirrus.conf' in dir_list)
self.failUnless('HISTORY.md' in dir_list)
self.failUnless('MANIFEST.in' in dir_list)
self.failUnless('setup.py' in dir_list)
cirrus_conf = os.path.join(self.repo, 'cirrus.conf')
config = ConfigParser.RawConfigParser()
config.read(cirrus_conf)
self.assertEqual(config.get('package', 'name'), opts.pypi_package_name)
self.assertEqual(config.get('package', 'version'), opts.version)
self.assertEqual(config.get('build', 'python'), 'python3')
@mock.patch("cirrus.package.requests.get")
def test_create_files_with_gitignore(self, mock_get):
"""test create_files call and content of files"""
mock_resp = mock.Mock()
mock_resp.raise_for_status = mock.Mock()
mock_resp.content = "IGNORE ME\n"
mock_get.return_value = mock_resp
opts = mock.Mock()
opts.repo = self.repo
opts.create_version_file = True
opts.source = 'src'
opts.version = '0.0.1'
opts.version_file = None
opts.org = "ORG"
opts.desc = "DESCRIPTION"
opts.templates = []
opts.test_mode = False
opts.history_file = 'HISTORY.md'
opts.package = 'unittests'
opts.develop = 'develop'
opts.requirements = 'requirements.txt'
opts.pypi_package_name = 'pypi.package.unittest'
opts.python = 'python3'
opts.gitignore_url = "GIT_IGNORE_URL"
opts.add_gitignore = True
opts.test_requirements = 'test-requirements.txt'
version = os.path.join(self.repo, 'src', 'unittests', '__init__.py')
os.system('rm -f {}'.format(version))
create_files(opts)
dir_list = os.listdir(self.repo)
self.failUnless('cirrus.conf' in dir_list)
self.failUnless('HISTORY.md' in dir_list)
self.failUnless('MANIFEST.in' in dir_list)
self.failUnless('setup.py' in dir_list)
self.failUnless('.gitignore' in dir_list)
gitignore = os.path.join(self.repo, '.gitignore')
with open(gitignore, 'r') as handle:
content = handle.read()
self.assertEqual(content.strip(), "IGNORE ME")
@unittest.skip("Integ test not unit test")
class PackageInitBootstrapTest(unittest.TestCase):
def setUp(self):
"""
set up for tests
"""
self.tempdir = tempfile.mkdtemp()
self.repo = os.path.join(self.tempdir, 'throwaway')
os.mkdir(self.repo)
cmd = (
"cd {} && git init && "
"git checkout -b master && "
"git commit --allow-empty -m \"new repo\" "
).format(self.repo)
os.system(cmd)
def tearDown(self):
if os.path.exists(self.tempdir):
os.system('rm -rf {}'.format(self.tempdir))
def test_init_command_dot_package(self):
"""test the init command"""
argslist = [
'init', '--bootstrap', '-p', 'pkg.module.throwaway', '-r', self.repo,
'--no-remote',
'-s', 'src'
]
opts = build_parser(argslist)
init_package(opts)
conf = os.path.join(self.repo, 'cirrus.conf')
self.failUnless(os.path.exists(conf))
src_dir = os.path.join(self.repo, 'src', 'pkg', 'module', 'throwaway', '__init__.py')
test_dir = os.path.join(self.repo, 'tests', 'unit', 'pkg', 'module', 'throwaway', '__init__.py')
sample = os.path.join(self.repo, 'tests', 'unit', 'pkg', 'module', 'throwaway', 'sample_test.py')
self.failUnless(os.path.exists(src_dir))
self.failUnless(os.path.exists(test_dir))
@unittest.skip("Integ test not unit test")
class PackageInitCommandIntegTest(unittest.TestCase):
"""test case for package init command """
def setUp(self):
"""
set up for tests
"""
self.tempdir = to_str(tempfile.mkdtemp())
self.repo = os.path.join(self.tempdir, 'throwaway')
src_dir = os.path.join(self.repo, 'src')
pkg_dir = os.path.join(self.repo, 'src', 'throwaway')
os.mkdir(self.repo)
os.mkdir(src_dir)
os.mkdir(pkg_dir)
init_file = os.path.join(pkg_dir, '__init__.py')
with open(init_file, 'w') as handle:
handle.write('# initfile\n')
cmd = (
"cd {} && git init && "
"git checkout -b master && "
"git commit --allow-empty -m \"new repo\" "
).format(self.repo)
os.system(cmd)
def tearDown(self):
if os.path.exists(self.tempdir):
os.system('rm -rf {}'.format(self.tempdir))
def test_init_command(self):
"""test the init command"""
argslist = [
'init', '-p', 'throwaway', '-r', self.repo,
'--no-remote',
'-s', 'src',
'--templates', 'src/throwaway/templates/*'
]
opts = build_parser(argslist)
init_package(opts)
conf = os.path.join(self.repo, 'cirrus.conf')
self.failUnless(os.path.exists(conf))
@mock.patch('cirrus.editor_plugin.load_configuration')
def test_project_sublime_command(self, mock_lc):
"""
test the sublime project command plugin
"""
mock_config = mock.Mock()
mock_config.package_name = mock.Mock(return_value='unittests')
mock_lc.return_value = mock_config
argslist = [
'project',
'-t', 'Sublime',
'-r', self.repo,
]
opts = build_parser(argslist)
build_project(opts)
proj = os.path.join(self.repo, 'unittests.sublime-project')
self.failUnless(os.path.exists(proj))
with open(proj, 'r') as handle:
data = json.load(handle)
self.failUnless('folders' in data)
self.failUnless(data['folders'])
self.failUnless('path' in data['folders'][0])
self.assertEqual(data['folders'][0]['path'], self.repo)
build = data['build_systems'][0]
self.failUnless('name' in build)
self.assertEqual(build['name'], "cirrus virtualenv")
self.failUnless('env' in build)
self.failUnless('PYTHONPATH' in build['env'])
self.assertEqual(build['env']['PYTHONPATH'], self.repo)
if __name__ == '__main__':
unittest.main()
| 35.339623 | 105 | 0.601234 |
5c9fd61f090c03cd64b4a64eeb81f7236d797cdb | 333 | py | Python | src/core/aws/data/message.py | zaxmks/demo-data-compliance-service | 372e612c570aaf5b512bec17627f825e880add67 | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | src/core/aws/data/message.py | zaxmks/demo-data-compliance-service | 372e612c570aaf5b512bec17627f825e880add67 | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | src/core/aws/data/message.py | zaxmks/demo-data-compliance-service | 372e612c570aaf5b512bec17627f825e880add67 | [
"CNRI-Python",
"CECILL-B"
] | null | null | null | from dataclasses import dataclass
from uuid import UUID
from dataclasses_json import LetterCase, dataclass_json
from src.core.aws.data.file_message import FileMessage
@dataclass_json(letter_case=LetterCase.PASCAL)
@dataclass
class AwsMessage:
message_id: UUID
receipt_handle: str
MD5OfBody: str
body: FileMessage
| 20.8125 | 55 | 0.804805 |
1485bf3d0456ace95183a96dad05df6ee701fec0 | 4,960 | py | Python | automlToolkit/components/hpo_optimizer/utils/acquisition.py | zwt233/automl-toolkit | 67d057f5e0c74bec5b3cbde1440ec014696737ef | [
"MIT"
] | null | null | null | automlToolkit/components/hpo_optimizer/utils/acquisition.py | zwt233/automl-toolkit | 67d057f5e0c74bec5b3cbde1440ec014696737ef | [
"MIT"
] | 1 | 2020-04-28T15:34:27.000Z | 2020-04-28T15:34:27.000Z | automlToolkit/components/hpo_optimizer/utils/acquisition.py | zwt233/automl-toolkit | 67d057f5e0c74bec5b3cbde1440ec014696737ef | [
"MIT"
] | null | null | null | import abc
import logging
import numpy as np
from scipy.stats import norm
from automlToolkit.components.hpo_optimizer.utils.base_epm import AbstractEPM
class AbstractAcquisitionFunction(object, metaclass=abc.ABCMeta):
"""Abstract base class for acquisition function
Attributes
----------
model
logger
"""
def __str__(self):
return type(self).__name__ + " (" + self.long_name + ")"
def __init__(self, model: AbstractEPM, **kwargs):
"""Constructor
Parameters
----------
model : AbstractEPM
Models the objective function.
"""
self.model = model
self.logger = logging.getLogger(
self.__module__ + "." + self.__class__.__name__)
def update(self, **kwargs):
"""Update the acquisition functions values.
This method will be called if the model is updated. E.g.
entropy search uses it to update its approximation of P(x=x_min),
EI uses it to update the current fmin.
The default implementation takes all keyword arguments and sets the
respective attributes for the acquisition function object.
Parameters
----------
kwargs
"""
for key in kwargs:
setattr(self, key, kwargs[key])
def __call__(self, configurations: np.ndarray):
"""Computes the acquisition value for a given X
Parameters
----------
configurations : list
The configurations where the acquisition function
should be evaluated.
Returns
-------
np.ndarray(N, 1)
acquisition values for X
"""
X = configurations
if len(X.shape) == 1:
X = X[np.newaxis, :]
acq = self._compute(X)
if np.any(np.isnan(acq)):
idx = np.where(np.isnan(acq))[0]
acq[idx, :] = -np.finfo(np.float).max
return acq
@abc.abstractmethod
def _compute(self, X: np.ndarray):
"""Computes the acquisition value for a given point X. This function has
to be overwritten in a derived class.
Parameters
----------
X : np.ndarray
The input points where the acquisition function
should be evaluated. The dimensionality of X is (N, D), with N as
the number of points to evaluate at and D is the number of
dimensions of one X.
Returns
-------
np.ndarray(N,1)
Acquisition function values wrt X
"""
raise NotImplementedError()
class EI(AbstractAcquisitionFunction):
r"""Computes for a given x the expected improvement as
acquisition value.
:math:`EI(X) := \mathbb{E}\left[ \max\{0, f(\mathbf{X^+}) - f_{t+1}(\mathbf{X}) - \xi\right] \} ]`,
with :math:`f(X^+)` as the incumbent.
"""
def __init__(self,
model: AbstractEPM,
par: float=0.0,
**kwargs):
"""Constructor
Parameters
----------
model : AbstractEPM
A model that implements at least
- predict_marginalized_over_instances(X)
par : float, default=0.0
Controls the balance between exploration and exploitation of the
acquisition function.
"""
super(EI, self).__init__(model)
self.long_name = 'Expected Improvement'
self.par = par
self.eta = None
def _compute(self, X: np.ndarray, **kwargs):
"""Computes the EI value and its derivatives.
Parameters
----------
X: np.ndarray(N, D), The input points where the acquisition function
should be evaluated. The dimensionality of X is (N, D), with N as
the number of points to evaluate at and D is the number of
dimensions of one X.
Returns
-------
np.ndarray(N,1)
Expected Improvement of X
"""
if len(X.shape) == 1:
X = X[:, np.newaxis]
m, v = self.model.predict_marginalized_over_instances(X)
s = np.sqrt(v)
if self.eta is None:
raise ValueError('No current best specified. Call update('
'eta=<int>) to inform the acquisition function '
'about the current best value.')
eta = self.eta['obj']
z = (eta - m - self.par) / s
f = (eta - m - self.par) * norm.cdf(z) + s * norm.pdf(z)
if np.any(s == 0.0):
# if std is zero, we have observed x on all instances
# using a RF, std should be never exactly 0.0
self.logger.warn("Predicted std is 0.0 for at least one sample.")
f[s == 0.0] = 0.0
if (f < 0).any():
raise ValueError(
"Expected Improvement is smaller than 0 for at least one "
"sample.")
return f
| 29.700599 | 103 | 0.553427 |
8fba32d12111f85dc80af9dc8bb2227ff00fd5d8 | 1,814 | py | Python | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-02T15:39:19.000Z | 2020-02-25T12:52:40.000Z | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 16 | 2018-03-19T15:02:18.000Z | 2020-02-08T02:06:07.000Z | tests/v2/test_parser.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-17T10:43:30.000Z | 2020-03-12T10:49:51.000Z | """Test parser functions that converts the incoming json from API into dataclass models."""
from dataclasses import dataclass
from typing import Optional
import pytest
from aiohue.util import dataclass_from_dict
@dataclass
class BasicModelChild:
"""Basic test model."""
a: int
b: str
c: str
d: Optional[int]
@dataclass
class BasicModel:
"""Basic test model."""
a: int
b: float
c: str
d: Optional[int]
e: BasicModelChild
f: str = "default"
def test_dataclass_from_dict():
"""Test dataclass from dict parsing."""
raw = {
"a": 1,
"b": 1.0,
"c": "hello",
"d": 1,
"e": {"a": 2, "b": "test", "c": "test", "d": None},
}
res = dataclass_from_dict(BasicModel, raw)
# test the basic values
assert isinstance(res, BasicModel)
assert res.a == 1
assert res.b == 1.0
assert res.d == 1
# test recursive parsing
assert isinstance(res.e, BasicModelChild)
# test default value
assert res.f == "default"
# test int gets converted to float
raw["b"] = 2
res = dataclass_from_dict(BasicModel, raw)
assert res.b == 2.0
# test string doesn't match int
with pytest.raises(TypeError):
raw2 = {**raw}
raw2["a"] = "blah"
dataclass_from_dict(BasicModel, raw2)
# test missing key result in keyerror
with pytest.raises(KeyError):
raw2 = {**raw}
del raw2["a"]
dataclass_from_dict(BasicModel, raw2)
# test extra keys silently ignored in non-strict mode
raw2 = {**raw}
raw2["extrakey"] = "something"
dataclass_from_dict(BasicModel, raw2, strict=False)
# test extra keys not silently ignored in strict mode
with pytest.raises(KeyError):
dataclass_from_dict(BasicModel, raw2, strict=True)
| 25.549296 | 91 | 0.624035 |
7e0f06cddda074ff02ce88a20972a5cdab502bd6 | 15,872 | py | Python | pyatop/structs/atop_230.py | dfrtz/pyatop | d12001caf685fe04d501f10d78f1c6c113440c6f | [
"Apache-2.0"
] | null | null | null | pyatop/structs/atop_230.py | dfrtz/pyatop | d12001caf685fe04d501f10d78f1c6c113440c6f | [
"Apache-2.0"
] | null | null | null | pyatop/structs/atop_230.py | dfrtz/pyatop | d12001caf685fe04d501f10d78f1c6c113440c6f | [
"Apache-2.0"
] | null | null | null | """Structs and definitions used serialize/deserialize ATOP statistics directly from log files.
Structs are declared in a way that will help provide as close to a 1 to 1 match as possible for debuggability
and maintenance. The _fields_ of every struct match their original name, however the struct names have been updated
to match python CamelCase standards. Each struct includes the following to help identify the original source:
C Name: utsname
C Location: sys/utsname.h
Struct ordering and visual whitespace in the _fields_ are left to help match the original source in readability.
If structs match exactly from a previous version, they are reused via aliasing.
See https://github.com/Atoptool/atop for more information and references to the C process source code.
Using schemas and structs from ATOP 2.30.
"""
import ctypes
from pyatop.structs import atop_126
# Disable the following pylint warnings to allow the variables and classes to match the style from the C.
# This helps with maintainability and cross-referencing.
# pylint: disable=invalid-name,too-few-public-methods
# Definitions from time.h
time_t = ctypes.c_long
# Definitions from atop.h
count_t = ctypes.c_longlong
ACCTACTIVE = 0x00000001
PATCHSTAT = 0x00000002
IOSTAT = 0x00000004
PATCHACCT = 0x00000008
# Definitions from sys/types.h
off_t = ctypes.c_long
# Definitions from photoproc.h
PNAMLEN = 15
CMDLEN = 255
# Definitions from photosyst.h
MAXCPU = 2048
MAXDSK = 1024
MAXLVM = 2048
MAXMDD = 256
MAXINTF = 128
MAXCONTAINER = 128
MAXNFSMOUNT = 64
MAXDKNAM = 32
UTSName = atop_126.UTSName
class Header(ctypes.Structure):
"""Top level struct to describe information about the system running ATOP and the log file itself.
Field descriptions from atop:
aversion Creator atop version with MSB.
future1 Can be reused.
future2 Can be reused.
rawheadlen Length of struct rawheader.
rawreclen Length of struct rawrecord.
hertz Clock interrupts per second.
sfuture[6] Future use.
sstatlen Length of struct sstat.
tstatlen Length of struct tstat.
utsname Info about this system.
C Name: rawheader
C Location: rawlog.c
"""
_fields_ = [
('magic', ctypes.c_uint),
('aversion', ctypes.c_ushort),
('future1', ctypes.c_ushort),
('future2', ctypes.c_ushort),
('rawheadlen', ctypes.c_ushort),
('rawreclen', ctypes.c_ushort),
('hertz', ctypes.c_ushort),
('sfuture', ctypes.c_ushort * 6),
('sstatlen', ctypes.c_uint),
('tstatlen', ctypes.c_uint),
('utsname', UTSName),
('cfuture', ctypes.c_char * 8),
('pagesize', ctypes.c_uint),
('supportflags', ctypes.c_int),
('osrel', ctypes.c_int),
('osvers', ctypes.c_int),
('ossub', ctypes.c_int),
('ifuture', ctypes.c_int * 6),
]
def check_compatibility(self) -> None:
"""Verify if the loaded values are compatible with this header version.
Raises:
ValueError if not compatible.
"""
compatible = [
self.sstatlen == ctypes.sizeof(SStat),
self.tstatlen == ctypes.sizeof(TStat),
self.rawheadlen == ctypes.sizeof(Header),
self.rawreclen == ctypes.sizeof(Record),
]
if not all(compatible):
raise ValueError(f'File has incompatible atop format. Struct length evaluations: {compatible}')
def get_version(self) -> float:
"""Convert the raw version into a semantic version.
Returns:
version: The final major.minor version from the header aversion.
"""
major = (self.aversion >> 8) & 0x7f
minor = self.aversion & 0xff
version = float(f'{major}.{minor}')
return version
class Record(ctypes.Structure):
"""Top level struct to describe basic process information, and the following SStat and TStat structs.
Field descriptions from atop:
curtime Current time (epoch).
flags Various flags.
sfuture[3] Future use.
scomplen Length of compressed sstat.
pcomplen Length of compressed tstats.
interval Interval (number of seconds).
ndeviat Number of tasks in list.
nactproc Number of processes in list.
ntask Total number of tasks.
totproc Total number of processes.
totrun Number of running threads.
totslpi Number of sleeping threads(S).
totslpu Number of sleeping threads(D).
totzomb Number of zombie processes.
nexit Number of exited processes.
noverflow Number of overflow processes.
ifuture[6] Future use.
C Name: rawrecord
C Location: rawlog.c
"""
_fields_ = [
('curtime', time_t),
('flags', ctypes.c_ushort),
('sfuture', ctypes.c_ushort * 3),
('scomplen', ctypes.c_uint),
('pcomplen', ctypes.c_uint),
('interval', ctypes.c_uint),
('ndeviat', ctypes.c_uint),
('nactproc', ctypes.c_uint),
('ntask', ctypes.c_uint),
('totproc', ctypes.c_uint),
('totrun', ctypes.c_uint),
('totslpi', ctypes.c_uint),
('totslpu', ctypes.c_uint),
('totzomb', ctypes.c_uint),
('nexit', ctypes.c_uint),
('noverflow', ctypes.c_uint),
('ifuture', ctypes.c_uint * 6),
]
class MemStat(ctypes.Structure):
"""Embedded struct to describe basic memory information.
C Name: memstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('physmem', count_t),
('freemem', count_t),
('buffermem', count_t),
('slabmem', count_t),
('cachemem', count_t),
('cachedrt', count_t),
('totswap', count_t),
('freeswap', count_t),
('pgscans', count_t),
('pgsteal', count_t),
('allocstall', count_t),
('swouts', count_t),
('swins', count_t),
('commitlim', count_t),
('committed', count_t),
('shmem', count_t),
('shmrss', count_t),
('shmswp', count_t),
('slabreclaim', count_t),
('tothugepage', count_t),
('freehugepage', count_t),
('hugepagesz', count_t),
('vmwballoon', count_t),
('cfuture', count_t * 8),
]
FreqCnt = atop_126.FreqCnt
class PerCPU(ctypes.Structure):
"""Embedded struct to describe per processor usage information.
C Name: percpu
C Location: photosyst.h
C Parent: cpustat
"""
_fields_ = [
('cpunr', ctypes.c_int),
('stime', count_t),
('utime', count_t),
('ntime', count_t),
('itime', count_t),
('wtime', count_t),
('Itime', count_t),
('Stime', count_t),
('steal', count_t),
('guest', count_t),
('freqcnt', FreqCnt),
('cfuture', count_t * 4),
]
class CPUStat(ctypes.Structure):
"""Embedded struct to describe basic overall processor information.
C Name: cpustat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrcpu', count_t),
('devint', count_t),
('csw', count_t),
('nprocs', count_t),
('lavg1', ctypes.c_float),
('lavg5', ctypes.c_float),
('lavg15', ctypes.c_float),
('cfuture', count_t * 4),
('all', PerCPU),
('cpu', PerCPU * MAXCPU)
]
class PerDSK(ctypes.Structure):
"""Embedded struct to describe per disk information.
C Name: perdsk
C Location: photosyst.h
C Parent: dskstat
"""
_fields_ = [
('name', ctypes.c_char * MAXDKNAM),
('nread', count_t),
('nrsect', count_t),
('nwrite', count_t),
('nwsect', count_t),
('io_ms', count_t),
('avque', count_t),
('cfuture', count_t * 4),
]
class DSKStat(ctypes.Structure):
"""Embedded struct to describe overall disk information.
C Name: dskstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('ndsk', ctypes.c_int),
('nmdd', ctypes.c_int),
('nlvm', ctypes.c_int),
('dsk', PerDSK * MAXDSK),
('mdd', PerDSK * MAXMDD),
('lvm', PerDSK * MAXLVM),
]
class PerIntf(ctypes.Structure):
"""Embedded struct to describe per interface statistics.
C Name: perintf
C Location: photosyst.h
C Parent: intfstat
"""
_fields_ = [
('name', ctypes.c_char * 16),
('rbyte', count_t),
('rpack', count_t),
('rerrs', count_t),
('rdrop', count_t),
('rfifo', count_t),
('rframe', count_t),
('rcompr', count_t),
('rmultic', count_t),
('rfuture', count_t * 4),
('sbyte', count_t),
('spack', count_t),
('serrs', count_t),
('sdrop', count_t),
('sfifo', count_t),
('scollis', count_t),
('scarrier', count_t),
('scompr', count_t),
('sfuture', count_t * 4),
('type', ctypes.c_char),
('speed', ctypes.c_long),
('speedp', ctypes.c_long),
('duplex', ctypes.c_char),
('cfuture', count_t * 4),
]
class IntfStat(ctypes.Structure):
"""Embedded struct to describe overall interface statistics.
C Name: intfstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrintf', ctypes.c_int),
('intf', PerIntf * MAXINTF),
]
class PerNFSMount(ctypes.Structure):
"""Embedded struct to describe per NFS mount statistics.
C Name: pernfsmount
C Location: photosyst.h
C Parent: nfsmounts
"""
_fields_ = [
('name', ctypes.c_char * 128),
('age', count_t),
('bytesread', count_t),
('byteswrite', count_t),
('bytesdread', count_t),
('bytesdwrite', count_t),
('bytestotread', count_t),
('bytestotwrite', count_t),
('pagesmread', count_t),
('pagesmwrite', count_t),
('future', count_t * 8),
]
class Server(ctypes.Structure):
"""Embedded struct to describe NFS server information from the 'NFS' parseable.
C Name: server
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('netcnt', count_t),
('netudpcnt', count_t),
('nettcpcnt', count_t),
('nettcpcon', count_t),
('rpccnt', count_t),
('rpcbadfmt', count_t),
('rpcbadaut', count_t),
('rpcbadcln', count_t),
('rpcread', count_t),
('rpcwrite', count_t),
('rchits', count_t),
('rcmiss', count_t),
('rcnoca', count_t),
('nrbytes', count_t),
('nwbytes', count_t),
('future', count_t * 8),
]
class Client(ctypes.Structure):
"""Embedded struct to describe NFS client information from the 'NFC' parseable.
C Name: client
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('rpccnt', count_t),
('rpcretrans', count_t),
('rpcautrefresh', count_t),
('rpcread', count_t),
('rpcwrite', count_t),
('future', count_t * 8),
]
class NFSMounts(ctypes.Structure):
"""Embedded struct to describe NFS mount information from the 'NFM' parseable.
C Name: mfsmounts
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('nrmounts', ctypes.c_int),
('pernfsmount', PerNFSMount * MAXNFSMOUNT)
]
class NFSStat(ctypes.Structure):
"""Embedded struct to describe NFS subsystem.
C Name: nfstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('server', Server),
('client', Client),
('nfsmounts', NFSMounts),
]
class PerContainer(ctypes.Structure):
"""Embedded struct to describe per container statistics.
C Name: percontainer
C Location: photosyst.h
C Parent: constat
"""
_fields_ = [
('ctid', ctypes.c_ulong),
('numproc', ctypes.c_ulong),
('system', count_t),
('user', count_t),
('nice', count_t),
('uptime', count_t),
('physpages', count_t),
]
class ContStat(ctypes.Structure):
"""Embedded struct to describe container subsystem.
C Name: contstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrcontainer', ctypes.c_int),
('cont', PerContainer * MAXCONTAINER),
]
WWWStat = atop_126.WWWStat
IPv4Stats = atop_126.IPv4Stats
ICMPv4Stats = atop_126.ICMPv4Stats
UDPv4Stats = atop_126.UDPv4Stats
TCPStats = atop_126.TCPStats
IPv6Stats = atop_126.IPv6Stats
ICMPv6Stats = atop_126.ICMPv6Stats
UDPv6Stats = atop_126.UDPv6Stats
NETStat = atop_126.NETStat
class SStat(ctypes.Structure):
"""Top level struct to describe various subsystems.
C Name: sstat
C Location: photosyst.h
"""
_fields_ = [
('cpu', CPUStat),
('mem', MemStat),
('net', NETStat),
('intf', IntfStat),
('dsk', DSKStat),
('nfs', NFSStat),
('cfs', ContStat),
('www', WWWStat),
]
class GEN(ctypes.Structure):
"""Embedded struct to describe a single process' general information from the 'GEN' parseable.
C Name: gen
C Location: photoproc.h
C Parent: tstat
"""
_fields_ = [
('tgid', ctypes.c_int),
('pid', ctypes.c_int),
('ppid', ctypes.c_int),
('ruid', ctypes.c_int),
('euid', ctypes.c_int),
('suid', ctypes.c_int),
('fsuid', ctypes.c_int),
('rgid', ctypes.c_int),
('egid', ctypes.c_int),
('sgid', ctypes.c_int),
('fsgid', ctypes.c_int),
('nthr', ctypes.c_int),
('name', ctypes.c_char * (PNAMLEN + 1)),
('isproc', ctypes.c_char),
('state', ctypes.c_char),
('excode', ctypes.c_int),
('btime', time_t),
('elaps', time_t),
('cmdline', ctypes.c_char * (CMDLEN + 1)),
('nthrslpi', ctypes.c_int),
('nthrslpu', ctypes.c_int),
('nthrrun', ctypes.c_int),
('ctid', ctypes.c_int),
('vpid', ctypes.c_int),
('wasinactive', ctypes.c_int),
('container', ctypes.c_char * 16),
]
CPU = atop_126.CPU
DSK = atop_126.DSK
class MEM(ctypes.Structure):
"""Embedded struct to describe a single process' memory usage from the 'MEM' parseable.
C Name: mem
C Location: photoproc.h
C Parent: tstat
"""
_fields_ = [
('minflt', count_t),
('majflt', count_t),
('vexec', count_t),
('vmem', count_t),
('rmem', count_t),
('pmem', count_t),
('vgrow', count_t),
('rgrow', count_t),
('vdata', count_t),
('vstack', count_t),
('vlibs', count_t),
('vswap', count_t),
('cfuture', count_t * 4),
]
class NET(ctypes.Structure):
"""Embedded struct to describe a single process' network usage from the 'NET' parseable.
C Name: net
C Location: photoproc.h
C Parent: tstat
"""
_fields_ = [
('tcpsnd', count_t),
('tcpssz', count_t),
('tcprcv', count_t),
('tcprsz', count_t),
('udpsnd', count_t),
('udpssz', count_t),
('udprcv', count_t),
('udprsz', count_t),
('avail1', count_t),
('avail2', count_t),
('cfuture', count_t * 4),
]
class TStat(ctypes.Structure):
"""Top level struct to describe multiple statistics categories per task/process.
C Name: tstat
C Location: photoproc.h
"""
_fields_ = [
('gen', GEN),
('cpu', CPU),
('dsk', DSK),
('mem', MEM),
('net', NET),
]
| 24.158295 | 115 | 0.575416 |
ca7e9067092edcad5d977e36e23658fb3ef43ff7 | 553 | py | Python | load_word_2vec_model.py | rathee/Siamese_Text_Similaity | 11dfdb620770c9e43ca85af8a5788852fa2920ae | [
"MIT"
] | 2 | 2019-09-02T23:04:04.000Z | 2021-08-19T13:22:05.000Z | load_word_2vec_model.py | rathee/Siamese_Text_Similaity | 11dfdb620770c9e43ca85af8a5788852fa2920ae | [
"MIT"
] | null | null | null | load_word_2vec_model.py | rathee/Siamese_Text_Similaity | 11dfdb620770c9e43ca85af8a5788852fa2920ae | [
"MIT"
] | null | null | null | from gensim.models import Word2Vec
import numpy as np
model = Word2Vec.load('/home/rathee/projects/quora_similar_questions/data/300features_40minwords_10context_new')
def get_model_embeddings () :
w2v = np.zeros((len(model.wv.index2word) + 1, 300))
model_dict = {}
index_to_word = {}
w2v[0] = np.zeros(300)
for i,word in enumerate(model.wv.index2word):
w2v[i+1] = model[word]
model_dict[word] = i + 1
index_to_word[i+1] = word
return w2v, model_dict, index_to_word
#w2v,model_dict, index_to_word = get_model_embeddings()
#print len(w2v)
| 27.65 | 112 | 0.743219 |
5d2d148a8b2bcf3629704a0a60ca1f3aed7b342f | 51 | py | Python | symmetric_case/full_sym_base/base_flow.py | aero-cfd/Cylinder2DFlowControlDRL | fd174085fb8621d6906bfdf394c49776dc5ce515 | [
"MIT"
] | 112 | 2019-02-20T15:24:16.000Z | 2022-03-21T02:17:56.000Z | symmetric_case/full_sym_base/base_flow.py | aero-cfd/Cylinder2DFlowControlDRL | fd174085fb8621d6906bfdf394c49776dc5ce515 | [
"MIT"
] | 24 | 2019-03-12T14:12:18.000Z | 2022-03-11T11:11:37.000Z | symmetric_case/full_sym_base/base_flow.py | aero-cfd/Cylinder2DFlowControlDRL | fd174085fb8621d6906bfdf394c49776dc5ce515 | [
"MIT"
] | 61 | 2019-02-20T13:54:49.000Z | 2022-03-28T12:29:02.000Z | import env
env.resume_env(plot=False, remesh=True)
| 17 | 39 | 0.803922 |
30bf2d4381b50d69090f0c3eca16b39b5be6ee00 | 920 | py | Python | io_base/pickle_io.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 14 | 2020-02-07T21:36:39.000Z | 2022-03-12T22:37:04.000Z | io_base/pickle_io.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 19 | 2019-05-18T23:58:30.000Z | 2022-01-09T16:45:35.000Z | io_base/pickle_io.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 5 | 2020-10-06T06:10:27.000Z | 2021-07-08T12:58:46.000Z | import pickle
def save_file(data, filename):
"""Save data as pickle. The standard pickle file name is ``*.pk``.
See a `benchmark on IO performance <http://stackoverflow.com/a/41425878/5620182>`_
Args:
data (np.array or dict): Data to save.
filename (str): Name of the file.
Examples:
>>> data = np.ones(5)
>>> save_file(data, 'file.pk')
>>> os.path.isfile('file.pk')
True
>>> os.remove('file.pk')
>>> os.path.isfile('file.pk')
False
"""
pickle.dump(data, open(filename, "wb"))
def read_file(filename):
"""Read a pickle file.
Args:
filename (str): Name of the file.
Returns:
np.array or dict: Data to read.
Examples:
>>> read_file('share/data.pk')
array([1., 1., 1., 1., 1.])
"""
data = pickle.load(open(filename, "rb"))
return data
| 21.904762 | 86 | 0.538043 |
43e27e375f6d2119eecbba569490eea643b1bd45 | 3,387 | py | Python | sdk/python/pulumi_aws/swf/domain.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/swf/domain.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/swf/domain.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Domain(pulumi.CustomResource):
description: pulumi.Output[str]
"""
The domain description.
"""
name: pulumi.Output[str]
"""
The name of the domain. If omitted, Terraform will assign a random, unique name.
"""
name_prefix: pulumi.Output[str]
"""
Creates a unique name beginning with the specified prefix. Conflicts with `name`.
"""
workflow_execution_retention_period_in_days: pulumi.Output[str]
"""
Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
def __init__(__self__, resource_name, opts=None, description=None, name=None, name_prefix=None, workflow_execution_retention_period_in_days=None, __name__=None, __opts__=None):
"""
Provides an SWF Domain resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The domain description.
:param pulumi.Input[str] name: The name of the domain. If omitted, Terraform will assign a random, unique name.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified prefix. Conflicts with `name`.
:param pulumi.Input[str] workflow_execution_retention_period_in_days: Length of time that SWF will continue to retain information about the workflow execution after the workflow execution is complete, must be between 0 and 90 days.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['description'] = description
__props__['name'] = name
__props__['name_prefix'] = name_prefix
if workflow_execution_retention_period_in_days is None:
raise TypeError("Missing required property 'workflow_execution_retention_period_in_days'")
__props__['workflow_execution_retention_period_in_days'] = workflow_execution_retention_period_in_days
super(Domain, __self__).__init__(
'aws:swf/domain:Domain',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.987013 | 239 | 0.702096 |
b1b493e4782c793974ca0eafb212437dcd4e1748 | 10,312 | py | Python | kolibri/core/analytics/utils.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | kolibri/core/analytics/utils.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | kolibri/core/analytics/utils.py | nucleogenesis/kolibri | 7b653a28f014ed9d0f29d116e120d1a02eb62b4c | [
"MIT"
] | null | null | null | import base64
import datetime
import hashlib
import json
import re
import semver
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction
from django.db.models import Count
from django.db.models import Max
from django.db.models import Min
from django.db.models import Sum
from .models import PingbackNotification
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.models import FacilityUser
from kolibri.core.content.models import LocalFile
from kolibri.core.exams.models import Exam
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import UserSessionLog
facility_settings = [
"preset",
"learner_can_edit_username",
"learner_can_edit_name",
"learner_can_edit_password",
"learner_can_sign_up",
"learner_can_delete_account",
"learner_can_login_with_no_password",
"show_download_button_in_learn",
"allow_guest_access",
]
def dump_zipped_json(data):
jsondata = json.dumps(data, sort_keys=True, cls=DjangoJSONEncoder)
try:
# perform the import in here as zlib isn't available on some platforms
import zlib
jsondata = zlib.compress(jsondata)
except: # noqa
pass
return jsondata
# Copied from https://github.com/learningequality/nutritionfacts/commit/b33e19400ae639cbcf2b2e9b312d37493eb1e566#diff-5b7513e7bc7d64d348fd8d3f2222b573
# TODO: move to le-utils package
def version_matches_range(version, version_range):
# if no version range is provided, assume we don't have opinions about the version
if not version_range or version_range == "*":
return True
# support having multiple comma-delimited version criteria
if "," in version_range:
return all(
[
version_matches_range(version, vrange)
for vrange in version_range.split(",")
]
)
# extract and normalize version strings
operator, range_version = re.match(r"([<>=!]*)(\d.*)", version_range).groups()
range_version = normalize_version_to_semver(range_version)
version = normalize_version_to_semver(version)
# check whether the version is in the range
return semver.match(version, operator + range_version)
def normalize_version_to_semver(version):
_, dev = re.match(r"(.*?)(\.dev.*)?$", version).groups()
# extract the numeric semver component and the stuff that comes after
numeric, after = re.match(r"(\d+\.\d+\.\d+)([^\d].*)?", version).groups()
# clean up the different variations of the post-numeric component to ease checking
after = (after or "").strip("-").strip("+").strip(".").split("+")[0]
# split up the alpha/beta letters from the numbers, to sort numerically not alphabetically
after_pieces = re.match(r"([a-z])(\d+)", after)
if after_pieces:
after = ".".join([piece for piece in after_pieces.groups() if piece])
# position final releases between alphas, betas, and further dev
if not dev:
after = (after + ".c").strip(".")
# make sure dev versions are sorted nicely relative to one another
dev = (dev or "").replace("+", ".").replace("-", ".")
return "{}-{}{}".format(numeric, after, dev).strip("-")
def extract_facility_statistics(facility):
dataset_id = facility.dataset_id
settings = {
name: getattr(facility.dataset, name)
for name in facility_settings
if hasattr(facility.dataset, name)
}
learners = FacilityUser.objects.filter(dataset_id=dataset_id).exclude(
roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]
)
coaches = FacilityUser.objects.filter(
dataset_id=dataset_id, roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]
)
usersessions = UserSessionLog.objects.filter(dataset_id=dataset_id)
contsessions = ContentSessionLog.objects.filter(
dataset_id=dataset_id, time_spent__lt=3600 * 2
)
# the aggregates below are used to calculate the first and most recent times this device was used
usersess_agg = usersessions.filter(
start_timestamp__gt=datetime.datetime(2016, 1, 1)
).aggregate(first=Min("start_timestamp"), last=Max("last_interaction_timestamp"))
contsess_agg = contsessions.filter(
start_timestamp__gt=datetime.datetime(2016, 1, 1)
).aggregate(first=Min("start_timestamp"), last=Max("end_timestamp"))
# extract the first and last times we've seen logs, ignoring any that are None
first_times = [d["first"] for d in [usersess_agg, contsess_agg] if d["first"]]
last_times = [d["last"] for d in [usersess_agg, contsess_agg] if d["last"]]
# since newly provisioned devices won't have logs, we don't know whether we have an available datetime object
first_interaction_timestamp = (
getattr(min(first_times), "strftime", None) if first_times else None
)
last_interaction_timestamp = (
getattr(max(last_times), "strftime", None) if last_times else None
)
sesslogs_by_kind = (
contsessions.order_by("kind").values("kind").annotate(count=Count("kind"))
)
sesslogs_by_kind = {log["kind"]: log["count"] for log in sesslogs_by_kind}
summarylogs = ContentSummaryLog.objects.filter(dataset_id=dataset_id)
contsessions_user = contsessions.exclude(user=None)
contsessions_anon = contsessions.filter(user=None)
# fmt: off
return {
# facility_id
"fi": base64.encodestring(hashlib.md5(facility.id.encode()).digest())[:10].decode(),
# settings
"s": settings,
# learners_count
"lc": learners.count(),
# learner_login_count
"llc": usersessions.exclude(user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]).distinct().count(),
# coaches_count
"cc": coaches.count(),
# coach_login_count
"clc": usersessions.filter(user__roles__kind__in=[role_kinds.ADMIN, role_kinds.COACH]).distinct().count(),
# first
"f" : first_interaction_timestamp("%Y-%m-%d") if first_interaction_timestamp else None,
# last
"l": last_interaction_timestamp("%Y-%m-%d") if last_interaction_timestamp else None,
# summ_started
"ss": summarylogs.count(),
# summ_complete
"sc": summarylogs.exclude(completion_timestamp=None).count(),
# sess_kinds
"sk": sesslogs_by_kind,
# lesson_count
"lec": Lesson.objects.filter(dataset_id=dataset_id).count(),
# exam_count
"ec": Exam.objects.filter(dataset_id=dataset_id).count(),
# exam_log_count
"elc": ExamLog.objects.filter(dataset_id=dataset_id).count(),
# att_log_count
"alc": AttemptLog.objects.filter(dataset_id=dataset_id).count(),
# exam_att_log_count
"ealc": ExamAttemptLog.objects.filter(dataset_id=dataset_id).count(),
# sess_user_count
"suc": contsessions_user.count(),
# sess_anon_count
"sac": contsessions_anon.count(),
# sess_user_time
"sut": int((contsessions_user.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
# sess_anon_time
"sat": int((contsessions_anon.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
}
# fmt: on
def extract_channel_statistics(channel):
channel_id = channel.id
tree_id = channel.root.tree_id
sessionlogs = ContentSessionLog.objects.filter(
channel_id=channel_id, time_spent__lt=3600 * 2
)
summarylogs = ContentSummaryLog.objects.filter(channel_id=channel_id)
sesslogs_by_kind = (
sessionlogs.order_by("kind").values("kind").annotate(count=Count("kind"))
)
sesslogs_by_kind = {log["kind"]: log["count"] for log in sesslogs_by_kind}
pop = list(
sessionlogs.values("content_id")
.annotate(count=Count("id"))
.order_by("-count")[:50]
)
localfiles = LocalFile.objects.filter(
available=True, files__contentnode__tree_id=tree_id
).distinct()
contsessions_user = sessionlogs.exclude(user=None)
contsessions_anon = sessionlogs.filter(user=None)
# fmt: off
return {
# channel_id
"ci": channel_id[:10],
# version
"v": channel.version,
# updated
"u": channel.last_updated.strftime("%Y-%m-%d") if channel.last_updated else None,
# popular_ids
"pi": [item["content_id"][:10] for item in pop],
# popular_counts
"pc": [item["count"] for item in pop],
# storage calculated by the MB
"s": (localfiles.aggregate(Sum("file_size"))["file_size__sum"] or 0) / (2 ** 20),
# summ_started
"ss": summarylogs.count(),
# summ_complete
"sc": summarylogs.exclude(completion_timestamp=None).count(),
# sess_kinds
"sk": sesslogs_by_kind,
# sess_user_count
"suc": contsessions_user.count(),
# sess_anon_count
"sac": contsessions_anon.count(),
# sess_user_time
"sut": int((contsessions_user.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
# sess_anon_time
"sat": int((contsessions_anon.aggregate(total_time=Sum("time_spent"))["total_time"] or 0) / 60),
}
# fmt: on
@transaction.atomic
def create_and_update_notifications(data, source):
messages = [obj for obj in data.get("messages", []) if obj.get("msg_id")]
excluded_ids = [obj.get("msg_id") for obj in messages]
PingbackNotification.objects.filter(source=source).exclude(
id__in=excluded_ids
).update(active=False)
for msg in messages:
new_msg = {
"id": msg["msg_id"],
"version_range": msg.get("version_range"),
"link_url": msg.get("link_url"),
"i18n": msg.get("i18n"),
"timestamp": msg.get("timestamp"),
"source": source,
"active": True,
}
PingbackNotification.objects.update_or_create(
id=new_msg["id"], defaults=new_msg
)
| 36.567376 | 151 | 0.671936 |
66f44cad288925ab97681b6a789b4d3eab1246c5 | 1,277 | py | Python | test/functional/p2p-mempool.py | Frusop/copper | a5e9d15ef68231bc28ce0a394b1e967a6cf0b1dd | [
"MIT"
] | null | null | null | test/functional/p2p-mempool.py | Frusop/copper | a5e9d15ef68231bc28ce0a394b1e967a6cf0b1dd | [
"MIT"
] | 1 | 2019-01-11T07:01:14.000Z | 2019-01-12T18:14:49.000Z | test/functional/p2p-mempool.py | Frusop/copper | a5e9d15ef68231bc28ce0a394b1e967a6cf0b1dd | [
"MIT"
] | 1 | 2019-01-11T06:00:53.000Z | 2019-01-11T06:00:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Copper Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import CopperTestFramework
from test_framework.util import *
class P2PMempoolTests(CopperTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 32.74359 | 75 | 0.708692 |
80e134ea6bd5e7757e2ec4c69f2ccf4a41d36682 | 3,367 | py | Python | ckan/ckanext-dcatapit/ckanext/dcatapit/tests/test_dcatapit_profile_serialize.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | ckan/ckanext-dcatapit/ckanext/dcatapit/tests/test_dcatapit_profile_serialize.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | ckan/ckanext-dcatapit/ckanext/dcatapit/tests/test_dcatapit_profile_serialize.py | lorenzoeusepi77/ckan | 3a620e9c81ba1750d12941c02184092d507f71df | [
"Apache-2.0"
] | null | null | null | import json
import nose
from pylons import config
from dateutil.parser import parse as parse_date
from rdflib import URIRef, BNode, Literal
from rdflib.namespace import RDF
from geomet import wkt
try:
from ckan.tests import helpers, factories
except ImportError:
from ckan.new_tests import helpers, factories
from ckanext.dcat import utils
from ckanext.dcat.processors import RDFSerializer
from ckanext.dcat.profiles import (DCAT, DCT, ADMS, XSD, VCARD, FOAF, SCHEMA,
SKOS, LOCN, GSP, OWL, SPDX, GEOJSON_IMT)
from ckanext.dcatapit.dcat.profiles import (DCATAPIT)
eq_ = nose.tools.eq_
assert_true = nose.tools.assert_true
class BaseSerializeTest(object):
def _triples(self, graph, subject, predicate, _object, data_type=None):
if not (isinstance(_object, URIRef) or isinstance(_object, BNode) or _object is None):
if data_type:
_object = Literal(_object, datatype=data_type)
else:
_object = Literal(_object)
triples = [t for t in graph.triples((subject, predicate, _object))]
return triples
def _triple(self, graph, subject, predicate, _object, data_type=None):
triples = self._triples(graph, subject, predicate, _object, data_type)
return triples[0] if triples else None
class TestDCATAPITProfileSerializeDataset(BaseSerializeTest):
def test_graph_from_dataset(self):
dataset = {
'id': '4b6fe9ca-dc77-4cec-92a4-55c6624a5bd6',
'name': 'test-dataset',
'title': 'Dataset di test DCAT_AP-IT',
'notes': 'dcatapit dataset di test',
'metadata_created': '2015-06-26T15:21:09.034694',
'metadata_modified': '2015-06-26T15:21:09.075774',
'tags': [{'name': 'Tag 1'}, {'name': 'Tag 2'}],
'issued':'2016-11-29',
'modified':'2016-11-29',
'identifier':'ISBN',
'temporal_start':'2016-11-01',
'temporal_end':'2016-11-30',
'frequency':'UPDATE_CONT',
'publisher_name':'bolzano',
'publisher_identifier':'234234234',
'creator_name':'test',
'creator_identifier':'412946129',
'holder_name':'bolzano',
'holder_identifier':'234234234',
'alternate_identifier':'ISBN,TEST',
'theme':'{ECON,ENVI}',
'geographical_geonames_url':'http://www.geonames.org/3181913',
'language':'{DEU,ENG,ITA}',
'is_version_of':'http://dcat.geo-solutions.it/dataset/energia-da-fonti-rinnovabili2',
'conforms_to':'{CONF1,CONF2,CONF3}'
}
s = RDFSerializer()
g = s.g
dataset_ref = s.graph_from_dataset(dataset)
eq_(unicode(dataset_ref), utils.dataset_uri(dataset))
# Basic fields
assert self._triple(g, dataset_ref, RDF.type, DCATAPIT.Dataset)
assert self._triple(g, dataset_ref, DCT.title, dataset['title'])
assert self._triple(g, dataset_ref, DCT.description, dataset['notes'])
assert self._triple(g, dataset_ref, DCT.identifier, dataset['identifier'])
# Tags
eq_(len([t for t in g.triples((dataset_ref, DCAT.keyword, None))]), 2)
for tag in dataset['tags']:
assert self._triple(g, dataset_ref, DCAT.keyword, tag['name'])
| 35.072917 | 97 | 0.627265 |
3a7871bc9df3ea2e7acecedc5a1ca39ac31c5cd8 | 1,511 | py | Python | ML/website-scraping-w-python-master/Chapter 4/06_csv_pipeline/06_csv_pipeline/sainsburys/sainsburys/pipelines.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | ML/website-scraping-w-python-master/Chapter 4/06_csv_pipeline/06_csv_pipeline/sainsburys/sainsburys/pipelines.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | ML/website-scraping-w-python-master/Chapter 4/06_csv_pipeline/06_csv_pipeline/sainsburys/sainsburys/pipelines.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sainsburys.items import SainsburysItem
class CsvItemPipeline:
fieldnames_standard = ['item_code', 'product_name', 'url', 'price_per_unit', 'unit', 'rating', 'product_reviews',
'product_origin', 'product_image']
def __init__(self, csv_filename):
self.items = []
self.csv_filename = csv_filename
@classmethod
def from_crawler(cls, crawler):
return cls(
csv_filename=crawler.settings.get('CSV_FILENAME', 'sainsburys.csv'),
)
def open_spider(self, spider):
pass
def close_spider(self, spider):
import csv
with open(self.csv_filename, 'w', encoding='utf-8') as outfile:
spamwriter = csv.DictWriter(outfile, fieldnames=self.get_fieldnames(), lineterminator='\n')
spamwriter.writeheader()
for item in self.items:
spamwriter.writerow(item)
def process_item(self, item, spider):
if type(item) == SainsburysItem:
new_item = dict(item)
new_item.pop('nutritions')
new_item.pop('image_urls')
self.items.append({**new_item, **item['nutritions']})
return item
def get_fieldnames(self):
field_names = set()
for product in self.items:
for key in product.keys():
if key not in self.fieldnames_standard:
field_names.add(key)
return self.fieldnames_standard + list(field_names)
| 32.847826 | 117 | 0.602912 |
ba79e19f7fe8301911524075af6d302bf55ceab7 | 21,523 | py | Python | synapse/storage/databases/main/transactions.py | mlakkadshaw/synapse | 74a2365bd5066955567cc551e72632d6cece94b9 | [
"Apache-2.0"
] | 1 | 2022-02-22T21:40:29.000Z | 2022-02-22T21:40:29.000Z | synapse/storage/databases/main/transactions.py | mlakkadshaw/synapse | 74a2365bd5066955567cc551e72632d6cece94b9 | [
"Apache-2.0"
] | 2 | 2022-03-01T08:22:45.000Z | 2022-03-11T08:13:55.000Z | synapse/storage/databases/main/transactions.py | mlakkadshaw/synapse | 74a2365bd5066955567cc551e72632d6cece94b9 | [
"Apache-2.0"
] | 1 | 2022-03-31T09:03:27.000Z | 2022-03-31T09:03:27.000Z | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, cast
import attr
from canonicaljson import encode_canonical_json
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.types import JsonDict
from synapse.util.caches.descriptors import cached
if TYPE_CHECKING:
from synapse.server import HomeServer
db_binary_type = memoryview
logger = logging.getLogger(__name__)
class DestinationSortOrder(Enum):
"""Enum to define the sorting method used when returning destinations."""
DESTINATION = "destination"
RETRY_LAST_TS = "retry_last_ts"
RETTRY_INTERVAL = "retry_interval"
FAILURE_TS = "failure_ts"
LAST_SUCCESSFUL_STREAM_ORDERING = "last_successful_stream_ordering"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DestinationRetryTimings:
"""The current destination retry timing info for a remote server."""
# The first time we tried and failed to reach the remote server, in ms.
failure_ts: int
# The last time we tried and failed to reach the remote server, in ms.
retry_last_ts: int
# How long since the last time we tried to reach the remote server before
# trying again, in ms.
retry_interval: int
class TransactionWorkerStore(CacheInvalidationWorkerStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
@wrap_as_background_process("cleanup_transactions")
async def _cleanup_transactions(self) -> None:
now = self._clock.time_msec()
month_ago = now - 30 * 24 * 60 * 60 * 1000
def _cleanup_transactions_txn(txn: LoggingTransaction) -> None:
txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
await self.db_pool.runInteraction(
"_cleanup_transactions", _cleanup_transactions_txn
)
async def get_received_txn_response(
self, transaction_id: str, origin: str
) -> Optional[Tuple[int, JsonDict]]:
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
body (as a dict).
Args:
transaction_id
origin
Returns:
None if we have not previously responded to this transaction or a
2-tuple of (int, dict)
"""
return await self.db_pool.runInteraction(
"get_received_txn_response",
self._get_received_txn_response,
transaction_id,
origin,
)
def _get_received_txn_response(
self, txn: LoggingTransaction, transaction_id: str, origin: str
) -> Optional[Tuple[int, JsonDict]]:
result = self.db_pool.simple_select_one_txn(
txn,
table="received_transactions",
keyvalues={"transaction_id": transaction_id, "origin": origin},
retcols=(
"transaction_id",
"origin",
"ts",
"response_code",
"response_json",
"has_been_referenced",
),
allow_none=True,
)
if result and result["response_code"]:
return result["response_code"], db_to_json(result["response_json"])
else:
return None
async def set_received_txn_response(
self, transaction_id: str, origin: str, code: int, response_dict: JsonDict
) -> None:
"""Persist the response we returned for an incoming transaction, and
should return for subsequent transactions with the same transaction_id
and origin.
Args:
transaction_id: The incoming transaction ID.
origin: The origin server.
code: The response code.
response_dict: The response, to be encoded into JSON.
"""
await self.db_pool.simple_upsert(
table="received_transactions",
keyvalues={
"transaction_id": transaction_id,
"origin": origin,
},
values={},
insertion_values={
"response_code": code,
"response_json": db_binary_type(encode_canonical_json(response_dict)),
"ts": self._clock.time_msec(),
},
desc="set_received_txn_response",
)
@cached(max_entries=10000)
async def get_destination_retry_timings(
self,
destination: str,
) -> Optional[DestinationRetryTimings]:
"""Gets the current retry timings (if any) for a given destination.
Args:
destination (str)
Returns:
None if not retrying
Otherwise a dict for the retry scheme
"""
result = await self.db_pool.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings,
destination,
)
return result
def _get_destination_retry_timings(
self, txn: LoggingTransaction, destination: str
) -> Optional[DestinationRetryTimings]:
result = self.db_pool.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
# check we have a row and retry_last_ts is not null or zero
# (retry_last_ts can't be negative)
if result and result["retry_last_ts"]:
return DestinationRetryTimings(**result)
else:
return None
async def set_destination_retry_timings(
self,
destination: str,
failure_ts: Optional[int],
retry_last_ts: int,
retry_interval: int,
) -> None:
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occurring.
Args:
destination
failure_ts: when the server started failing (ms since epoch)
retry_last_ts: time of last retry attempt in unix epoch ms
retry_interval: how long until next retry in ms
"""
if self.database_engine.can_native_upsert:
await self.db_pool.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings_native,
destination,
failure_ts,
retry_last_ts,
retry_interval,
db_autocommit=True, # Safe as its a single upsert
)
else:
await self.db_pool.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings_emulated,
destination,
failure_ts,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings_native(
self,
txn: LoggingTransaction,
destination: str,
failure_ts: Optional[int],
retry_last_ts: int,
retry_interval: int,
) -> None:
assert self.database_engine.can_native_upsert
# Upsert retry time interval if retry_interval is zero (i.e. we're
# resetting it) or greater than the existing retry interval.
#
# WARNING: This is executed in autocommit, so we shouldn't add any more
# SQL calls in here (without being very careful).
sql = """
INSERT INTO destinations (
destination, failure_ts, retry_last_ts, retry_interval
)
VALUES (?, ?, ?, ?)
ON CONFLICT (destination) DO UPDATE SET
failure_ts = EXCLUDED.failure_ts,
retry_last_ts = EXCLUDED.retry_last_ts,
retry_interval = EXCLUDED.retry_interval
WHERE
EXCLUDED.retry_interval = 0
OR destinations.retry_interval IS NULL
OR destinations.retry_interval < EXCLUDED.retry_interval
"""
txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
self._invalidate_cache_and_stream(
txn, self.get_destination_retry_timings, (destination,)
)
def _set_destination_retry_timings_emulated(
self,
txn: LoggingTransaction,
destination: str,
failure_ts: Optional[int],
retry_last_ts: int,
retry_interval: int,
) -> None:
self.database_engine.lock_table(txn, "destinations")
# We need to be careful here as the data may have changed from under us
# due to a worker setting the timings.
prev_row = self.db_pool.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
if not prev_row:
self.db_pool.simple_insert_txn(
txn,
table="destinations",
values={
"destination": destination,
"failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
)
elif (
retry_interval == 0
or prev_row["retry_interval"] is None
or prev_row["retry_interval"] < retry_interval
):
self.db_pool.simple_update_one_txn(
txn,
"destinations",
keyvalues={"destination": destination},
updatevalues={
"failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
)
self._invalidate_cache_and_stream(
txn, self.get_destination_retry_timings, (destination,)
)
async def store_destination_rooms_entries(
self,
destinations: Iterable[str],
room_id: str,
stream_ordering: int,
) -> None:
"""
Updates or creates `destination_rooms` entries in batch for a single event.
Args:
destinations: list of destinations
room_id: the room_id of the event
stream_ordering: the stream_ordering of the event
"""
await self.db_pool.simple_upsert_many(
table="destinations",
key_names=("destination",),
key_values=[(d,) for d in destinations],
value_names=[],
value_values=[],
desc="store_destination_rooms_entries_dests",
)
rows = [(destination, room_id) for destination in destinations]
await self.db_pool.simple_upsert_many(
table="destination_rooms",
key_names=("destination", "room_id"),
key_values=rows,
value_names=["stream_ordering"],
value_values=[(stream_ordering,)] * len(rows),
desc="store_destination_rooms_entries_rooms",
)
async def get_destination_last_successful_stream_ordering(
self, destination: str
) -> Optional[int]:
"""
Gets the stream ordering of the PDU most-recently successfully sent
to the specified destination, or None if this information has not been
tracked yet.
Args:
destination: the destination to query
"""
return await self.db_pool.simple_select_one_onecol(
"destinations",
{"destination": destination},
"last_successful_stream_ordering",
allow_none=True,
desc="get_last_successful_stream_ordering",
)
async def set_destination_last_successful_stream_ordering(
self, destination: str, last_successful_stream_ordering: int
) -> None:
"""
Marks that we have successfully sent the PDUs up to and including the
one specified.
Args:
destination: the destination we have successfully sent to
last_successful_stream_ordering: the stream_ordering of the most
recent successfully-sent PDU
"""
await self.db_pool.simple_upsert(
"destinations",
keyvalues={"destination": destination},
values={"last_successful_stream_ordering": last_successful_stream_ordering},
desc="set_last_successful_stream_ordering",
)
async def get_catch_up_room_event_ids(
self,
destination: str,
last_successful_stream_ordering: int,
) -> List[str]:
"""
Returns at most 50 event IDs and their corresponding stream_orderings
that correspond to the oldest events that have not yet been sent to
the destination.
Args:
destination: the destination in question
last_successful_stream_ordering: the stream_ordering of the
most-recently successfully-transmitted event to the destination
Returns:
list of event_ids
"""
return await self.db_pool.runInteraction(
"get_catch_up_room_event_ids",
self._get_catch_up_room_event_ids_txn,
destination,
last_successful_stream_ordering,
)
@staticmethod
def _get_catch_up_room_event_ids_txn(
txn: LoggingTransaction,
destination: str,
last_successful_stream_ordering: int,
) -> List[str]:
q = """
SELECT event_id FROM destination_rooms
JOIN events USING (stream_ordering)
WHERE destination = ?
AND stream_ordering > ?
ORDER BY stream_ordering
LIMIT 50
"""
txn.execute(
q,
(destination, last_successful_stream_ordering),
)
event_ids = [row[0] for row in txn]
return event_ids
async def get_catch_up_outstanding_destinations(
self, after_destination: Optional[str]
) -> List[str]:
"""
Gets at most 25 destinations which have outstanding PDUs to be caught up,
and are not being backed off from
Args:
after_destination:
If provided, all destinations must be lexicographically greater
than this one.
Returns:
list of up to 25 destinations with outstanding catch-up.
These are the lexicographically first destinations which are
lexicographically greater than after_destination (if provided).
"""
time = self.hs.get_clock().time_msec()
return await self.db_pool.runInteraction(
"get_catch_up_outstanding_destinations",
self._get_catch_up_outstanding_destinations_txn,
time,
after_destination,
)
@staticmethod
def _get_catch_up_outstanding_destinations_txn(
txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str]
) -> List[str]:
q = """
SELECT DISTINCT destination FROM destinations
INNER JOIN destination_rooms USING (destination)
WHERE
stream_ordering > last_successful_stream_ordering
AND destination > ?
AND (
retry_last_ts IS NULL OR
retry_last_ts + retry_interval < ?
)
ORDER BY destination
LIMIT 25
"""
txn.execute(
q,
(
# everything is lexicographically greater than "" so this gives
# us the first batch of up to 25.
after_destination or "",
now_time_ms,
),
)
destinations = [row[0] for row in txn]
return destinations
async def get_destinations_paginate(
self,
start: int,
limit: int,
destination: Optional[str] = None,
order_by: str = DestinationSortOrder.DESTINATION.value,
direction: str = "f",
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of destinations.
This will return a json list of destinations and the
total number of destinations matching the filter criteria.
Args:
start: start number to begin the query from
limit: number of rows to retrieve
destination: search string in destination
order_by: the sort order of the returned list
direction: sort ascending or descending
Returns:
A tuple of a list of mappings from destination to information
and a count of total destinations.
"""
def get_destinations_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
order_by_column = DestinationSortOrder(order_by).value
if direction == "b":
order = "DESC"
else:
order = "ASC"
args: List[object] = []
where_statement = ""
if destination:
args.extend(["%" + destination.lower() + "%"])
where_statement = "WHERE LOWER(destination) LIKE ?"
sql_base = f"FROM destinations {where_statement} "
sql = f"SELECT COUNT(*) as total_destinations {sql_base}"
txn.execute(sql, args)
count = cast(Tuple[int], txn.fetchone())[0]
sql = f"""
SELECT destination, retry_last_ts, retry_interval, failure_ts,
last_successful_stream_ordering
{sql_base}
ORDER BY {order_by_column} {order}, destination ASC
LIMIT ? OFFSET ?
"""
txn.execute(sql, args + [limit, start])
destinations = self.db_pool.cursor_to_dict(txn)
return destinations, count
return await self.db_pool.runInteraction(
"get_destinations_paginate_txn", get_destinations_paginate_txn
)
async def get_destination_rooms_paginate(
self, destination: str, start: int, limit: int, direction: str = "f"
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of destination's rooms.
This will return a json list of rooms and the
total number of rooms.
Args:
destination: the destination to query
start: start number to begin the query from
limit: number of rows to retrieve
direction: sort ascending or descending by room_id
Returns:
A tuple of a dict of rooms and a count of total rooms.
"""
def get_destination_rooms_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
if direction == "b":
order = "DESC"
else:
order = "ASC"
sql = """
SELECT COUNT(*) as total_rooms
FROM destination_rooms
WHERE destination = ?
"""
txn.execute(sql, [destination])
count = cast(Tuple[int], txn.fetchone())[0]
rooms = self.db_pool.simple_select_list_paginate_txn(
txn=txn,
table="destination_rooms",
orderby="room_id",
start=start,
limit=limit,
retcols=("room_id", "stream_ordering"),
order_direction=order,
)
return rooms, count
return await self.db_pool.runInteraction(
"get_destination_rooms_paginate_txn", get_destination_rooms_paginate_txn
)
async def is_destination_known(self, destination: str) -> bool:
"""Check if a destination is known to the server."""
result = await self.db_pool.simple_select_one_onecol(
table="destinations",
keyvalues={"destination": destination},
retcol="1",
allow_none=True,
desc="is_destination_known",
)
return bool(result)
| 34.602894 | 88 | 0.596803 |
d8452eee9313a1431fb76970bf74e1d7445fda57 | 23,774 | py | Python | nova/objects/migrate_data.py | sapcc/nova | ad71af7307365d6aabd122e140f56df4db1e6182 | [
"Apache-2.0"
] | 2 | 2021-10-11T04:56:25.000Z | 2022-02-16T08:49:29.000Z | nova/objects/migrate_data.py | sapcc/nova | ad71af7307365d6aabd122e140f56df4db1e6182 | [
"Apache-2.0"
] | 132 | 2017-03-27T11:31:52.000Z | 2022-03-30T08:45:02.000Z | nova/objects/migrate_data.py | sapcc/nova | ad71af7307365d6aabd122e140f56df4db1e6182 | [
"Apache-2.0"
] | 8 | 2017-03-27T07:50:38.000Z | 2020-02-14T16:55:56.000Z | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register
class VIFMigrateData(obj_base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
# The majority of the fields here represent a port binding on the
# **destination** host during a live migration. The vif_type, among
# other fields, could be different from the existing binding on the
# source host, which is represented by the "source_vif" field.
fields = {
'port_id': fields.StringField(),
'vnic_type': fields.StringField(), # TODO(sean-k-mooney): make enum?
'vif_type': fields.StringField(),
# vif_details is a dict whose contents are dependent on the vif_type
# and can be any number of types for the values, so we just store it
# as a serialized dict
'vif_details_json': fields.StringField(),
# profile is in the same random dict of terrible boat as vif_details
# so it's stored as a serialized json string
'profile_json': fields.StringField(),
'host': fields.StringField(),
# The source_vif attribute is a copy of the VIF network model
# representation of the port on the source host which can be used
# for filling in blanks about the VIF (port) when building a
# configuration reference for the destination host.
# NOTE(mriedem): This might not be sufficient based on how the
# destination host is configured for all vif types. See the note in
# the libvirt driver here: https://review.openstack.org/#/c/551370/
# 29/nova/virt/libvirt/driver.py@7036
'source_vif': fields.Field(fields.NetworkVIFModel()),
}
@property
def vif_details(self):
return jsonutils.loads(self.vif_details_json)
@vif_details.setter
def vif_details(self, vif_details_dict):
self.vif_details_json = jsonutils.dumps(vif_details_dict)
@property
def profile(self):
return jsonutils.loads(self.profile_json)
@profile.setter
def profile(self, profile_dict):
self.profile_json = jsonutils.dumps(profile_dict)
def get_dest_vif(self):
"""Get a destination VIF representation of this object.
This method takes the source_vif and updates it to include the
destination host port binding information using the other fields
on this object.
:return: nova.network.model.VIF object
"""
if 'source_vif' not in self:
raise exception.ObjectActionError(
action='get_dest_vif', reason='source_vif is not set')
vif = copy.deepcopy(self.source_vif)
vif['type'] = self.vif_type
vif['vnic_type'] = self.vnic_type
vif['profile'] = self.profile
vif['details'] = self.vif_details
return vif
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added old_vol_attachment_ids field.
# Version 1.2: Added wait_for_vif_plugged
# Version 1.3: Added vifs field.
VERSION = '1.3'
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
# old_vol_attachment_ids is a dict used to store the old attachment_ids
# for each volume so they can be restored on a migration rollback. The
# key is the volume_id, and the value is the attachment_id.
# TODO(mdbooth): This field was made redundant by change I0390c9ff. We
# should eventually remove it.
'old_vol_attachment_ids': fields.DictOfStringsField(),
# wait_for_vif_plugged is set in pre_live_migration on the destination
# compute host based on the [compute]/live_migration_wait_for_vif_plug
# config option value; a default value is not set here since the
# default for the config option may change in the future
'wait_for_vif_plugged': fields.BooleanField(),
'vifs': fields.ListOfObjectsField('VIFMigrateData'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@classmethod
def detect_implementation(cls, legacy_dict):
if 'instance_relative_path' in legacy_dict:
obj = LibvirtLiveMigrateData()
elif 'image_type' in legacy_dict:
obj = LibvirtLiveMigrateData()
elif 'migrate_data' in legacy_dict:
obj = XenapiLiveMigrateData()
else:
obj = LiveMigrateData()
obj.from_legacy_dict(legacy_dict)
return obj
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
# VERSION 1.0 : Initial version
# VERSION 1.1 : Added encryption_secret_uuid for tracking volume secret
# uuid created on dest during migration with encrypted vols.
VERSION = '1.1'
fields = {
# FIXME(danms): some of these can be enums?
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
'encryption_secret_uuid': fields.UUIDField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
super(LibvirtLiveMigrateBDMInfo, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'encryption_secret_uuid' in primitive:
del primitive['encryption_secret_uuid']
# NOTE(danms): We don't have a connection_info object right
# now, and instead mostly store/pass it as JSON that we're
# careful with. When we get a connection_info object in the
# future, we should use it here, so make this easy to convert
# for later.
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added target_connect_addr
# Version 1.2: Added 'serial_listen_ports' to allow live migration with
# serial console.
# Version 1.3: Added 'supported_perf_events'
# Version 1.4: Added old_vol_attachment_ids
# Version 1.5: Added src_supports_native_luks
# Version 1.6: Added wait_for_vif_plugged
# Version 1.7: Added dst_wants_file_backed_memory
# Version 1.8: Added file_backed_memory_discard
# Version 1.9: Inherited vifs from LiveMigrateData
VERSION = '1.9'
fields = {
'filename': fields.StringField(),
# FIXME: image_type should be enum?
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(nullable=True),
'serial_listen_ports': fields.ListOfIntegersField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
'target_connect_addr': fields.StringField(nullable=True),
'supported_perf_events': fields.ListOfStringsField(),
'src_supports_native_luks': fields.BooleanField(),
'dst_wants_file_backed_memory': fields.BooleanField(),
# file_backed_memory_discard is ignored unless
# dst_wants_file_backed_memory is set
'file_backed_memory_discard': fields.BooleanField(),
}
def obj_make_compatible(self, primitive, target_version):
super(LibvirtLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 9) and 'vifs' in primitive:
del primitive['vifs']
if target_version < (1, 8):
if 'file_backed_memory_discard' in primitive:
del primitive['file_backed_memory_discard']
if target_version < (1, 7):
if 'dst_wants_file_backed_memory' in primitive:
del primitive['dst_wants_file_backed_memory']
if target_version < (1, 6) and 'wait_for_vif_plugged' in primitive:
del primitive['wait_for_vif_plugged']
if target_version < (1, 5):
if 'src_supports_native_luks' in primitive:
del primitive['src_supports_native_luks']
if target_version < (1, 4):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 3):
if 'supported_perf_events' in primitive:
del primitive['supported_perf_events']
if target_version < (1, 2):
if 'serial_listen_ports' in primitive:
del primitive['serial_listen_ports']
if target_version < (1, 1) and 'target_connect_addr' in primitive:
del primitive['target_connect_addr']
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s', self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
transport_target = legacy.pop('target_connect_addr', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
'target_connect_addr': transport_target,
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s', legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s', legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
self.target_connect_addr = pre_result.get('target_connect_addr')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s', self)
def is_on_shared_storage(self):
return self.is_shared_block_storage or self.is_shared_instance_path
@obj_base.NovaObjectRegistry.register
class XenapiLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added vif_uuid_map
# Version 1.2: Added old_vol_attachment_ids
# Version 1.3: Added wait_for_vif_plugged
# Version 1.4: Inherited vifs from LiveMigrateData
VERSION = '1.4'
fields = {
'block_migration': fields.BooleanField(nullable=True),
'destination_sr_ref': fields.StringField(nullable=True),
'migrate_send_data': fields.DictOfStringsField(nullable=True),
'sr_uuid_map': fields.DictOfStringsField(),
'kernel_file': fields.StringField(),
'ramdisk_file': fields.StringField(),
'vif_uuid_map': fields.DictOfStringsField(),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = super(XenapiLiveMigrateData, self).to_legacy_dict()
if self.obj_attr_is_set('block_migration'):
legacy['block_migration'] = self.block_migration
if self.obj_attr_is_set('migrate_send_data'):
legacy['migrate_data'] = {
'migrate_send_data': self.migrate_send_data,
'destination_sr_ref': self.destination_sr_ref,
}
live_result = {
'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map
or {}),
'vif_uuid_map': ('vif_uuid_map' in self and self.vif_uuid_map
or {}),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
return legacy
def from_legacy_dict(self, legacy):
super(XenapiLiveMigrateData, self).from_legacy_dict(legacy)
if 'block_migration' in legacy:
self.block_migration = legacy['block_migration']
else:
self.block_migration = False
if 'migrate_data' in legacy:
self.migrate_send_data = \
legacy['migrate_data']['migrate_send_data']
self.destination_sr_ref = \
legacy['migrate_data']['destination_sr_ref']
if 'pre_live_migration_result' in legacy:
self.sr_uuid_map = \
legacy['pre_live_migration_result']['sr_uuid_map']
self.vif_uuid_map = \
legacy['pre_live_migration_result'].get('vif_uuid_map', {})
def obj_make_compatible(self, primitive, target_version):
super(XenapiLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4) and 'vifs' in primitive:
del primitive['vifs']
if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
del primitive['wait_for_vif_plugged']
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'vif_uuid_map' in primitive:
del primitive['vif_uuid_map']
@obj_base.NovaObjectRegistry.register
class HyperVLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added is_shared_instance_path
# Version 1.2: Added old_vol_attachment_ids
# Version 1.3: Added wait_for_vif_plugged
# Version 1.4: Inherited vifs from LiveMigrateData
VERSION = '1.4'
fields = {'is_shared_instance_path': fields.BooleanField()}
def obj_make_compatible(self, primitive, target_version):
super(HyperVLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4) and 'vifs' in primitive:
del primitive['vifs']
if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
del primitive['wait_for_vif_plugged']
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'is_shared_instance_path' in primitive:
del primitive['is_shared_instance_path']
def to_legacy_dict(self, pre_migration_result=False):
legacy = super(HyperVLiveMigrateData, self).to_legacy_dict()
if self.obj_attr_is_set('is_shared_instance_path'):
legacy['is_shared_instance_path'] = self.is_shared_instance_path
return legacy
def from_legacy_dict(self, legacy):
super(HyperVLiveMigrateData, self).from_legacy_dict(legacy)
if 'is_shared_instance_path' in legacy:
self.is_shared_instance_path = legacy['is_shared_instance_path']
@obj_base.NovaObjectRegistry.register
class PowerVMLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added the Virtual Ethernet Adapter VLAN mappings.
# Version 1.2: Added old_vol_attachment_ids
# Version 1.3: Added wait_for_vif_plugged
# Version 1.4: Inherited vifs from LiveMigrateData
VERSION = '1.4'
fields = {
'host_mig_data': fields.DictOfNullableStringsField(),
'dest_ip': fields.StringField(),
'dest_user_id': fields.StringField(),
'dest_sys_name': fields.StringField(),
'public_key': fields.StringField(),
'dest_proc_compat': fields.StringField(),
'vol_data': fields.DictOfNullableStringsField(),
'vea_vlan_mappings': fields.DictOfNullableStringsField(),
}
def obj_make_compatible(self, primitive, target_version):
super(PowerVMLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4) and 'vifs' in primitive:
del primitive['vifs']
if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
del primitive['wait_for_vif_plugged']
if target_version < (1, 2):
if 'old_vol_attachment_ids' in primitive:
del primitive['old_vol_attachment_ids']
if target_version < (1, 1):
if 'vea_vlan_mappings' in primitive:
del primitive['vea_vlan_mappings']
def to_legacy_dict(self, pre_migration_result=False):
legacy = super(PowerVMLiveMigrateData, self).to_legacy_dict()
for field in self.fields:
if self.obj_attr_is_set(field):
legacy[field] = getattr(self, field)
return legacy
def from_legacy_dict(self, legacy):
super(PowerVMLiveMigrateData, self).from_legacy_dict(legacy)
for field in self.fields:
if field in legacy:
setattr(self, field, legacy[field])
@obj_base.NovaObjectRegistry.register
class VMWareLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added dest_cluster_ref, is_same_vcenter,
# instance_already_migrated, relocate_defaults_json,
# vif_infos_json for cross-vcenter migration
VERSION = '1.1'
fields = {
'cluster_name': fields.StringField(nullable=False),
'datastore_regex': fields.StringField(nullable=False),
'dest_cluster_ref': fields.StringField(nullable=False),
'is_same_vcenter': fields.BooleanField(default=True),
'instance_already_migrated': fields.BooleanField(default=False),
'relocate_defaults_json': fields.SensitiveStringField(default="[]"),
'vif_infos_json': fields.StringField(default="[]"),
}
@property
def relocate_defaults(self):
return jsonutils.loads(self.relocate_defaults_json)
@relocate_defaults.setter
def relocate_defaults(self, relocate_defaults_dict):
self.relocate_defaults_json = jsonutils.dumps(relocate_defaults_dict)
@property
def vif_infos(self):
return jsonutils.loads(self.vif_infos_json)
@vif_infos.setter
def vif_infos(self, vif_infos):
self.vif_infos_json = jsonutils.dumps(vif_infos)
def to_legacy_dict(self, pre_migration_result=False):
legacy = super(VMWareLiveMigrateData, self).to_legacy_dict()
for field in self.fields:
if self.obj_attr_is_set(field):
legacy[field] = getattr(self, field)
return legacy
def from_legacy_dict(self, legacy):
super(VMWareLiveMigrateData, self).from_legacy_dict(legacy)
for field in self.fields:
if field in legacy:
setattr(self, field, legacy[field])
def obj_make_compatible(self, primitive, target_version):
super(VMWareLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
for k in ('is_same_vcenter', 'instance_already_migrated',
'relocate_defaults_json', 'vif_infos_json'):
primitive.pop(k, None)
| 42.227353 | 79 | 0.662446 |
1b71a65f398c9feea9a469bb862a7252339134b1 | 1,578 | py | Python | utilities/testWriteSummaryTopology.py | LLNL/hdtopology | 303a0740d59831073b2af69be698dbe19932ec7b | [
"BSD-3-Clause"
] | null | null | null | utilities/testWriteSummaryTopology.py | LLNL/hdtopology | 303a0740d59831073b2af69be698dbe19932ec7b | [
"BSD-3-Clause"
] | null | null | null | utilities/testWriteSummaryTopology.py | LLNL/hdtopology | 303a0740d59831073b2af69be698dbe19932ec7b | [
"BSD-3-Clause"
] | null | null | null | import ngl
import numpy as np
from hdff import *
import hdtopology as hdt
n = 1000
d = 3
sample = np.random.uniform(-1.0,1.0,n*d).astype('f')
sample = sample.reshape(n, d)
###### test function ######
def ackley(domain, d=3):
# print "domain", domain.shape
Sum = np.zeros(domain.shape[0], dtype=float)
for i in range(d-1):
theta1 = 6*domain[:,i] - 3
theta2 = 6*domain[:,i+1] - 3
# sum -= exp(-0.2) * sqrt(pow(theta1, 2) + pow(theta2, 2)) + 3 * (cos(2 * theta1) + sin(2 * theta2));
# print(theta1.shape, Sum)
Sum = Sum - np.exp(-0.2) * np.sqrt(np.power(theta1, 2) + np.power(theta2, 2)) + 3 * (np.cos(2 * theta1) + np.sin(2 * theta2));
Sum = np.squeeze(np.array(Sum.T))
# print(Sum.shape)
return Sum
f = ackley(sample)
method = "RelaxedGabriel"
max_neighbors = 500
beta = 1.0
### provide recarray for data input ###
data = np.concatenate((sample, np.matrix(f).T), axis=1).astype('f')
names = ['X1', 'X2', 'X3', 'f']
types = ['<f4']*(d+1)
data = data.view(dtype=list(zip(names,types)) ).view(np.recarray)
print(data.dtype)
### provide array of unint32 for the edges
edges = ngl.getSymmetricNeighborGraph(method, sample, max_neighbors,beta)
print(edges, type(edges), edges.dtype)
### compute topology
eg = hdt.ExtremumGraphExt()
flag_array = np.array([0],dtype=np.uint8)
eg.initialize(data, flag_array, edges, True ,10, 1)
mc = DataBlockHandle()
mc.idString("TDA");
eg.save(mc)
dataset = DatasetHandle()
dataset.add(mc)
group = DataCollectionHandle("summaryTopologyTest.hdff")
group.add(dataset)
group.write()
| 28.690909 | 134 | 0.644487 |
4078619c49b423dbc777f0408855acca87b00d7a | 1,230 | py | Python | pyzoo/zoo/chronos/detector/anomaly/abstract.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 2,970 | 2017-06-08T00:24:43.000Z | 2022-03-30T12:14:55.000Z | pyzoo/zoo/chronos/detector/anomaly/abstract.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 3,530 | 2017-05-09T08:29:10.000Z | 2022-03-21T02:11:45.000Z | pyzoo/zoo/chronos/detector/anomaly/abstract.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 972 | 2017-05-09T07:03:50.000Z | 2022-03-23T07:48:48.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
class AnomalyDetector(ABC):
"""
The Anomaly Detector Interface
"""
@abstractmethod
def fit(self, y):
"""
fit the model to the input time series
:param y: input time series
"""
pass
@abstractmethod
def score(self):
"""
calculate anomaly scores for each sample in input time series
:return: anomaly scores
"""
pass
@abstractmethod
def anomaly_indexes(self):
"""
gets the indexes of the anomalies.
:return: the indexes of the anomalies.
"""
pass
| 26.170213 | 74 | 0.656911 |
c3cb4534025fff01366dcd89ca3d438827539225 | 13,054 | py | Python | workers/base/wait_jobs.py | dperezrada/poliglo-base | 7460cf1a16e66d155baa8aff93b4a540360e1328 | [
"MIT"
] | null | null | null | workers/base/wait_jobs.py | dperezrada/poliglo-base | 7460cf1a16e66d155baa8aff93b4a540360e1328 | [
"MIT"
] | null | null | null | workers/base/wait_jobs.py | dperezrada/poliglo-base | 7460cf1a16e66d155baa8aff93b4a540360e1328 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#req:
#end req
import os
import uuid
from datetime import datetime
import json
import hashlib
from time import time
import poliglo
from poliglo.utils import to_json
POLIGLO_SERVER_URL = os.environ.get('POLIGLO_SERVER_URL')
META_WORKER = 'wait_jobs'
def check_if_waiting_is_done(connection, workflow_id, workflow_instance_id, waiting_workers_ids):
total_jobs_keys = [
poliglo.REDIS_KEY_INSTANCE_WORKER_JOBS % (
workflow_id, workflow_instance_id, wait_jobs_from, 'total'
)
for wait_jobs_from in waiting_workers_ids
]
done_jobs_keys = [
poliglo.REDIS_KEY_INSTANCE_WORKER_JOBS % (
workflow_id, workflow_instance_id, wait_jobs_from, 'done'
)
for wait_jobs_from in waiting_workers_ids
]
pipe = connection.pipeline()
temp_union = 'temp:%s:%s' % (datetime.now().isoformat().split('T')[0], str(uuid.uuid4()))
pipe.sunionstore(temp_union, *total_jobs_keys)
temp_diff = 'temp:%s:%s' % (datetime.now().isoformat().split('T')[0], str(uuid.uuid4()))
pipe.sdiffstore(temp_diff, temp_union, *done_jobs_keys)
pipe.delete(temp_diff)
pipe.delete(temp_union)
execute_result = pipe.execute()
diff_count = execute_result[1]
done_signature = hashlib.sha1(workflow_id+"_"+workflow_instance_id+"_".join([str(x) for x in execute_result])).hexdigest()
if diff_count == 0:
return (True, done_signature)
return (False, done_signature)
def get_waiting_queue_name(workflow_instance_id, worker_id, wait_jobs_from):
wait_jobs_from_text = "_".join(sorted(list(set(wait_jobs_from))))
return "wait_jobs:%s_%s_%s" % (
workflow_instance_id,
worker_id,
wait_jobs_from_text
)
def process(specific_info, data, *args):
inputs = poliglo.get_inputs(data, specific_info)
connection = args[0].get('connection')
waiting_queue_name = get_waiting_queue_name(
data['workflow_instance']['id'], data['workflow_instance']['worker_id'], inputs['wait_jobs_from']
)
connection.zadd(waiting_queue_name, time(), to_json(data))
return []
def get_waiting_workers(worker_workflows):
all_waiting_workers = []
workflows_workers_waiting = {}
for workflow_id, workflow_values in worker_workflows.iteritems():
if not workflows_workers_waiting.get(workflow_id):
workflows_workers_waiting[workflow_id] = {}
for worker_id, worker_values in workflow_values.iteritems():
if not workflows_workers_waiting[workflow_id].get(worker_id):
workflows_workers_waiting[workflow_id][worker_id] = [worker_id,]
for worker_id2 in worker_values.get('default_inputs', {}).get('wait_jobs_from', []):
workflows_workers_waiting[workflow_id][worker_id].append(worker_id2)
all_waiting_workers.append(worker_id2)
return workflows_workers_waiting, list(set(all_waiting_workers))
def wait_is_done(connection, worker_workflows, workflow_id, workflow_instance_id, workflow_instance_name, worker_id, waiting_workers_ids):
waiting_queue_name = get_waiting_queue_name(
workflow_instance_id, worker_id, waiting_workers_ids
)
worker = worker_workflows.get(workflow_id, {}).get(worker_id, {})
for i, output_worker_id in enumerate(worker.get('next_workers', [])):
output_worker_type = worker.get('__next_workers_types', [])[i]
data = {'__read_from_queue': waiting_queue_name}
poliglo.start_workflow_instance(
connection, workflow_id, output_worker_type,
output_worker_id, workflow_instance_name, data
)
def main():
worker_workflows, connection = poliglo.prepare_worker(POLIGLO_SERVER_URL, META_WORKER)
workflow_waiting_workers, all_waiting_workers = get_waiting_workers(worker_workflows)
# TODO: Move to redis
already_done_signatures = []
found_finalized = False
found_wait = False
timeout_wait = 1
timeout_finalized = 1
while True:
if not found_wait:
queue_message = connection.brpop(
[poliglo.REDIS_KEY_QUEUE_FINALIZED,], timeout_finalized
)
if queue_message is not None:
found_finalized = True
finalized_data = json.loads(queue_message[1])
if finalized_data['worker_id'] not in all_waiting_workers:
continue
workflow_id = finalized_data['workflow']
workflow_instance_id = finalized_data['workflow_instance_id']
workflow_instance_name = finalized_data['workflow_instance_name']
for worker_id, waiting_workers_ids in workflow_waiting_workers[workflow_id].iteritems():
status_done, done_signature = check_if_waiting_is_done(
connection, workflow_id, workflow_instance_id, waiting_workers_ids
)
if status_done:
if done_signature not in already_done_signatures:
already_done_signatures.append(done_signature)
wait_is_done(
connection, worker_workflows, workflow_id, workflow_instance_id,
workflow_instance_name, worker_id, waiting_workers_ids
)
else:
found_finalized = False
if not found_finalized:
queue_message = connection.brpop([poliglo.REDIS_KEY_QUEUE % META_WORKER,], timeout_wait)
if queue_message is not None:
poliglo.default_main_inside(
connection, worker_workflows, queue_message, process, {'connection': connection}
)
found_wait = True
else:
found_wait = False
queue_message = None
if __name__ == '__main__':
main()
# INTEGRATION TEST
import signal
import subprocess
from unittest import TestCase
from shutil import copyfile
from time import sleep
from poliglo import start_workflow_instance
class TestWaitJobs(TestCase):
@classmethod
def _setup_config(cls):
cls.config = {
"all": {
"REDIS_HOST": "127.0.0.1",
"REDIS_PORT": 6379,
"REDIS_DB": 5,
"POLIGLO_SERVER_URL": "http://localhost:9016"
}
}
cls.config_path = "/tmp/config.json"
open(cls.config_path, 'w').write(to_json(cls.config))
@classmethod
def _setup_workflow(cls):
workflow = {
"id": "test_wait_jobs",
"name": "test_wait_jobs",
"start_worker_id": "generate_numbers_1",
"workers": {
"generate_numbers_1": {
"meta_worker": "generate_numbers",
"default_inputs": {
"numbers_range": [0, 10],
"sleep": 0
},
"next_workers": ["filter_1"]
},
"filter_1": {
"meta_worker": "filter",
"default_inputs": {
"min": 1000
},
"next_workers": ["wait_jobs_1"]
},
"wait_jobs_1": {
"meta_worker": "wait_jobs",
"default_inputs": {
"wait_jobs_from": ["generate_numbers_1", "filter_1", "wait_jobs_1"]
},
"next_workers": ["count_numbers_1"]
},
"count_numbers_1": {
"meta_worker": "count_numbers",
"next_workers": []
}
}
}
cls.workflow_path = "/tmp/wait_jobs_test_workflows"
if not os.path.exists(cls.workflow_path):
os.makedirs(cls.workflow_path)
open(cls.workflow_path+"/workflow_test_wait_jobs.json", 'w').write(to_json(workflow))
@classmethod
def _setup_master_mind_server(cls):
cls._setup_config()
cls._setup_workflow()
isolated_env = os.environ.copy()
isolated_env['CONFIG_PATH'] = cls.config_path
isolated_env['WORKFLOWS_PATH'] = cls.workflow_path
isolated_env['POLIGLO_SERVER_PORT'] = "9016"
cmd = ["poliglo_server",]
cls.server_process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=False, env=isolated_env, preexec_fn=os.setsid
)
@classmethod
def _setup_workers_files(cls):
cls.workers_path = "/tmp/wait_jobs_test_workers"
if not os.path.exists(cls.workers_path):
os.makedirs(cls.workers_path)
#WORKER generate_numbers
open(cls.workers_path+"/generate_numbers.py", 'w').write("""import time
import poliglo
import os
POLIGLO_SERVER_URL = os.environ.get('POLIGLO_SERVER_URL')
META_WORKER = 'generate_numbers'
def process(specific_info, data, *args):
inputs = poliglo.get_inputs(data, specific_info)
numbers_range = inputs.get('numbers_range')
sleep_time = inputs.get('sleep')
for i in range(numbers_range[0], numbers_range[1]):
time.sleep(sleep_time)
yield {'number': i}
poliglo.default_main(POLIGLO_SERVER_URL, META_WORKER, process)
""")
#WORKER filter
open(cls.workers_path+"/filter.py", 'w').write("""import time
import poliglo
import os
POLIGLO_SERVER_URL = os.environ.get('POLIGLO_SERVER_URL')
META_WORKER = 'filter'
def process(specific_info, data, *args):
inputs = poliglo.get_inputs(data, specific_info)
min_value = inputs.get("min")
if inputs['number'] < min_value:
return [inputs,]
return []
poliglo.default_main(POLIGLO_SERVER_URL, META_WORKER, process)
""")
#WORKER wait_jobs
copyfile(os.path.abspath(__file__), cls.workers_path+'/wait_jobs.py')
#WORKER count_numbers
open(cls.workers_path+"/count_numbers.py", 'w').write("""import time
import poliglo
import os
POLIGLO_SERVER_URL = os.environ.get('POLIGLO_SERVER_URL')
META_WORKER = 'count_numbers'
def process(specific_info, data, *args):
connection = args[0].get('connection')
inputs = poliglo.get_inputs(data, specific_info)
queue = inputs.get('__read_from_queue')
total = 0
for queue_data in connection.zrange(queue, 0, -1):
total +=1
return [{'total': total}]
config = poliglo.get_config(POLIGLO_SERVER_URL, 'all')
connection = poliglo.get_connection(config)
poliglo.default_main(POLIGLO_SERVER_URL, META_WORKER, process, {'connection': connection})
""")
@classmethod
def _setup_workers(cls):
cls._setup_workers_files()
isolated_env = os.environ.copy()
isolated_env['WORKERS_PATHS'] = cls.workers_path
isolated_env['POLIGLO_SERVER_URL'] = cls.config.get('all').get('POLIGLO_SERVER_URL')
isolated_env['DEPLOY_USER'] = 'test_user'
project_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..'
)
)
start_workers_path = os.path.join(project_dir, "start_workers.sh")
cmd = [
"/bin/bash",
start_workers_path
]
cls.workers_process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=False, env=isolated_env, preexec_fn=os.setsid
)
@classmethod
def setUpClass(cls):
cls._setup_master_mind_server()
sleep(1)
cls._setup_workers()
def setUp(self):
self.connection = poliglo.get_connection(self.config.get('all'))
self.connection.flushall()
@classmethod
def tearDownClass(cls):
os.killpg(cls.server_process.pid, signal.SIGTERM)
os.killpg(cls.workers_process.pid, signal.SIGTERM)
def test_wait_for_all_jobs(self):
self.workflow_instance_id = start_workflow_instance(
self.connection,
'test_wait_jobs', 'generate_numbers', 'generate_numbers_1', 'instance1', {}
)
queues = [None]
while len(queues) > 0:
sleep(1)
queues = self.connection.keys("queue:*")
total_finalized = self.connection.zcard(
"workflows:test_wait_jobs:workflow_instances:%s:workers:count_numbers_1:finalized" % \
self.workflow_instance_id
)
self.assertEqual(1, total_finalized)
def test_last_message_are_filtered(self):
self.workflow_instance_id = start_workflow_instance(
self.connection, 'test_wait_jobs',
'generate_numbers', 'generate_numbers_1', 'instance1', {
'numbers_range': [995, 1005]
}
)
queues = [None]
while len(queues) > 0:
sleep(1)
queues = self.connection.keys("queue:*")
total_finalized = self.connection.zcard(
"workflows:test_wait_jobs:workflow_instances:%s:workers:count_numbers_1:finalized" % \
self.workflow_instance_id
)
self.assertEqual(1, total_finalized)
| 34.534392 | 138 | 0.63222 |
6494521444d0dd607d00d68b3a7463d9212eca5c | 2,774 | py | Python | tests/model/test_summarizer_transformer.py | audriusuzkuraitis/headliner | f915306fd80218949a6447aa915c93ef4b44fdd0 | [
"MIT"
] | null | null | null | tests/model/test_summarizer_transformer.py | audriusuzkuraitis/headliner | f915306fd80218949a6447aa915c93ef4b44fdd0 | [
"MIT"
] | null | null | null | tests/model/test_summarizer_transformer.py | audriusuzkuraitis/headliner | f915306fd80218949a6447aa915c93ef4b44fdd0 | [
"MIT"
] | null | null | null | import os
import shutil
import tempfile
import unittest
import numpy as np
import tensorflow as tf
from headliner.losses import masked_crossentropy
from headliner.model.transformer_summarizer import TransformerSummarizer
from headliner.preprocessing.keras_tokenizer import KerasTokenizer
from headliner.preprocessing.preprocessor import Preprocessor
from headliner.preprocessing.vectorizer import Vectorizer
class TestSummarizerTransformer(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
tf.random.set_seed(42)
self.temp_dir = tempfile.mkdtemp(prefix='TestSummarizerTransformerTmp')
def tearDown(self) -> None:
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_serde_happy_path(self) -> None:
preprocessor = Preprocessor()
tokenizer = KerasTokenizer(oov_token='<unk>')
tokenizer.fit(['a b c {} {}'.format(
preprocessor.start_token, preprocessor.end_token)])
vectorizer = Vectorizer(tokenizer, tokenizer)
summarizer = TransformerSummarizer(num_layers=1,
num_heads=2,
max_prediction_len=3,
embedding_size=10,
embedding_encoder_trainable=False)
summarizer.init_model(preprocessor=preprocessor,
vectorizer=vectorizer)
# we need at least a train step to init the weights
train_step = summarizer.new_train_step(masked_crossentropy, batch_size=1, apply_gradients=True)
train_seq = tf.convert_to_tensor(np.array([[1, 1, 1]]), dtype=tf.int32)
train_step(train_seq, train_seq)
save_dir = os.path.join(self.temp_dir, 'summarizer_serde_happy_path')
summarizer.save(save_dir)
summarizer_loaded = TransformerSummarizer.load(save_dir)
self.assertEqual(1, summarizer_loaded.num_layers)
self.assertEqual(2, summarizer_loaded.num_heads)
self.assertEqual(3, summarizer_loaded.max_prediction_len)
self.assertEqual(10, summarizer_loaded.embedding_size)
self.assertIsNotNone(summarizer_loaded.preprocessor)
self.assertIsNotNone(summarizer_loaded.vectorizer)
self.assertIsNotNone(summarizer_loaded.transformer)
self.assertFalse(summarizer_loaded.transformer.encoder.embedding.trainable)
self.assertTrue(summarizer_loaded.transformer.decoder.embedding.trainable)
self.assertIsNotNone(summarizer_loaded.optimizer)
pred = summarizer.predict_vectors('a c', '')
pred_loaded = summarizer_loaded.predict_vectors('a c', '')
np.testing.assert_almost_equal(pred['logits'], pred_loaded['logits'], decimal=6)
| 44.741935 | 103 | 0.695025 |
f186a7e6a5ffafca15176c82a51cac5017a40770 | 6,407 | py | Python | zerver/lib/markdown/api_arguments_table_generator.py | savish28/zulip | cf5ededa3566b14c44375786ffeadacaec693e53 | [
"Apache-2.0"
] | 4 | 2020-09-26T17:46:27.000Z | 2021-06-24T16:56:17.000Z | zerver/lib/markdown/api_arguments_table_generator.py | savish28/zulip | cf5ededa3566b14c44375786ffeadacaec693e53 | [
"Apache-2.0"
] | null | null | null | zerver/lib/markdown/api_arguments_table_generator.py | savish28/zulip | cf5ededa3566b14c44375786ffeadacaec693e53 | [
"Apache-2.0"
] | 1 | 2020-11-26T14:09:56.000Z | 2020-11-26T14:09:56.000Z | import json
import os
import re
from typing import Any, Dict, List, Mapping, Sequence
import markdown
from django.utils.html import escape as escape_html
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from zerver.openapi.openapi import get_openapi_parameters, likely_deprecated_parameter
REGEXP = re.compile(r'\{generate_api_arguments_table\|\s*(.+?)\s*\|\s*(.+)\s*\}')
class MarkdownArgumentsTableGenerator(Extension):
def __init__(self, configs: Mapping[str, Any] = {}) -> None:
self.config = {
'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],
}
for key, value in configs.items():
self.setConfig(key, value)
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin',
)
class APIArgumentsTablePreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super().__init__(md)
self.base_path = config['base_path']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = REGEXP.search(line)
if not match:
continue
filename = match.group(1)
doc_name = match.group(2)
filename = os.path.expanduser(filename)
is_openapi_format = filename.endswith('.yaml')
if not os.path.isabs(filename):
parent_dir = self.base_path
filename = os.path.normpath(os.path.join(parent_dir, filename))
if is_openapi_format:
endpoint, method = doc_name.rsplit(':', 1)
arguments: List[Dict[str, Any]] = []
try:
arguments = get_openapi_parameters(endpoint, method)
except KeyError as e:
# Don't raise an exception if the "parameters"
# field is missing; we assume that's because the
# endpoint doesn't accept any parameters
if e.args != ('parameters',):
raise e
else:
with open(filename) as fp:
json_obj = json.load(fp)
arguments = json_obj[doc_name]
if arguments:
text = self.render_table(arguments)
else:
text = ['This endpoint does not accept any parameters.']
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding, *text, following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_table(self, arguments: Sequence[Mapping[str, Any]]) -> List[str]:
# TODO: Fix naming now that this no longer renders a table.
table = []
argument_template = """
<div class="api-argument" id="parameter-{argument}">
<p class="api-argument-name"><strong>{argument}</strong> {required} {deprecated} <a href="#parameter-{argument}" class="api-argument-hover-link"><i class="fa fa-chain"></i></a></p>
<div class="api-example">
<span class="api-argument-example-label">Example</span>: <code>{example}</code>
</div>
<div class="api-description">{description}</div>
<hr>
</div>"""
md_engine = markdown.Markdown(extensions=[])
arguments = sorted(arguments, key=lambda argument: 'deprecated' in argument)
for argument in arguments:
description = argument['description']
oneof = ['`' + str(item) + '`'
for item in argument.get('schema', {}).get('enum', [])]
if oneof:
description += '\nMust be one of: {}.'.format(', '.join(oneof))
default = argument.get('schema', {}).get('default')
if default is not None:
description += f'\nDefaults to `{json.dumps(default)}`.'
# TODO: OpenAPI allows indicating where the argument goes
# (path, querystring, form data...). We should document this detail.
example = ""
if 'example' in argument:
example = argument['example']
else:
example = json.dumps(argument['content']['application/json']['example'])
required_string: str = "required"
if argument.get('in', '') == 'path':
# Any path variable is required
assert argument['required']
required_string = 'required in path'
if argument.get('required', False):
required_block = f'<span class="api-argument-required">{required_string}</span>'
else:
required_block = '<span class="api-argument-optional">optional</span>'
# Test to make sure deprecated parameters are marked so.
if likely_deprecated_parameter(description):
assert(argument['deprecated'])
if argument.get('deprecated', False):
deprecated_block = '<span class="api-argument-deprecated">Deprecated</span>'
else:
deprecated_block = ''
table.append(argument_template.format(
argument=argument.get('argument') or argument.get('name'),
example=escape_html(example),
required=required_block,
deprecated=deprecated_block,
description=md_engine.convert(description),
))
return table
def makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:
return MarkdownArgumentsTableGenerator(kwargs)
| 41.335484 | 184 | 0.563758 |
17ae6f187afc4ad9caf92427521287e310f971b3 | 57,036 | py | Python | tests/hub_test.py | stevepbyrne/dbus-systemcalc-py | 4d50ca36af51bbe1e3040cb63f60ef262da5d397 | [
"MIT"
] | null | null | null | tests/hub_test.py | stevepbyrne/dbus-systemcalc-py | 4d50ca36af51bbe1e3040cb63f60ef262da5d397 | [
"MIT"
] | null | null | null | tests/hub_test.py | stevepbyrne/dbus-systemcalc-py | 4d50ca36af51bbe1e3040cb63f60ef262da5d397 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
# This adapts sys.path to include all relevant packages
import context
# our own packages
from base import TestSystemCalcBase
# Monkey patching for unit tests
import patches
class TestHubSystem(TestSystemCalcBase):
def __init__(self, methodName='runTest'):
TestSystemCalcBase.__init__(self, methodName)
def setUp(self):
TestSystemCalcBase.setUp(self)
self._add_device('com.victronenergy.vebus.ttyO1',
product_name='Multi',
values={
'/Ac/ActiveIn/L1/P': 123,
'/Ac/ActiveIn/ActiveInput': 0,
'/Ac/ActiveIn/Connected': 1,
'/Ac/Out/L1/P': 100,
'/Dc/0/Voltage': 12.25,
'/Dc/0/Current': -8,
'/Dc/0/Temperature': 24,
'/DeviceInstance': 0,
'/Devices/0/Assistants': [0x55, 0x1] + (26 * [0]), # Hub-4 assistant
'/Dc/0/MaxChargeCurrent': 999,
'/ExtraBatteryCurrent': 0,
'/Soc': 53.2,
'/State': 3,
'/BatteryOperationalLimits/MaxChargeVoltage': None,
'/BatteryOperationalLimits/MaxChargeCurrent': None,
'/BatteryOperationalLimits/MaxDischargeCurrent': None,
'/BatteryOperationalLimits/BatteryLowVoltage': None,
'/BatterySense/Voltage': None,
'/FirmwareFeatures/BolFrame': 1,
'/FirmwareFeatures/BolUBatAndTBatSense': 1,
'/FirmwareVersion': 0x456,
'/Hub4/L1/DoNotFeedInOvervoltage': 1
})
self._add_device('com.victronenergy.settings',
values={
'/Settings/SystemSetup/AcInput1': 1,
'/Settings/SystemSetup/AcInput2': 2,
})
self._set_setting('/Settings/Services/Bol', 1)
def test_hub1_control_voltage_with_state(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.6)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x129},
connection='VE.Direct')
self._update_values(3000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 12.6
}})
self._check_values({
'/Control/SolarChargeVoltage': 1,
'/Control/SolarChargerVoltageSense': 1})
def test_hub1_control_voltage_without_state(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.6)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._update_values(3000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 12.6,
'/State': 0
}})
self._check_values({
'/Control/SolarChargeVoltage': 1,
'/Control/SolarChargerVoltageSense': 1})
def test_hub1_control_voltage_multiple_solarchargers(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.5)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._update_values(3000)
self.assertEqual(12.5, self._monitor.get_value('com.victronenergy.solarcharger.ttyO1',
'/Link/ChargeVoltage'))
self.assertEqual(12.5, self._monitor.get_value('com.victronenergy.solarcharger.ttyO2',
'/Link/ChargeVoltage'))
self.assertEqual(0, self._monitor.get_value('com.victronenergy.solarcharger.ttyO2', '/State'))
self._check_values({'/Control/SolarChargeVoltage': 1})
def test_hub1_control_voltage_ve_can_solarchargers(self):
# Hub1 control should ignore VE.Can solarchargers
# self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.5)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7},
connection='VE.Can')
self._update_values()
self.assertEqual(None, self._monitor.get_value('com.victronenergy.solarcharger.ttyO1',
'/Link/ChargeVoltage'))
self.assertEqual(0, self._monitor.get_value('com.victronenergy.solarcharger.ttyO1', '/State'))
self._check_values({'/Control/SolarChargeVoltage': 0})
def test_hub1_control_ve_can_service(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.63)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.solarcharger.can0', {
'/State': 0,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7},
connection='VE.Can')
self._add_device('com.victronenergy.vecan.can0', {
'/Link/ChargeVoltage': None,
'/Link/NetworkMode': None,
'/Link/TemperatureSense': None,
'/Link/VoltageSense': None})
self._update_values(12000)
self.assertEqual(12.63, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self.assertEqual(5, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/NetworkMode'))
self.assertEqual(12.25, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/VoltageSense'))
self.assertEqual(24, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/TemperatureSense'))
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 13.2)
self._add_device('com.victronenergy.vecan.can1', {
'/Link/ChargeVoltage': None,
'/Link/NetworkMode': None,
'/Link/TemperatureSense': None,
'/Link/VoltageSense': None})
self._update_values(9000)
self.assertEqual(13.2, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self.assertEqual(13.2, self._monitor.get_value('com.victronenergy.vecan.can1', '/Link/ChargeVoltage'))
self.assertEqual(5, self._monitor.get_value('com.victronenergy.vecan.can1', '/Link/NetworkMode'))
self.assertEqual(12.25, self._monitor.get_value('com.victronenergy.vecan.can1', '/Link/VoltageSense'))
self.assertEqual(24, self._monitor.get_value('com.victronenergy.vecan.can1', '/Link/TemperatureSense'))
self._remove_device('com.victronenergy.vecan.can0')
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 13.1)
self._update_values(interval=3000)
self.assertEqual(None, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self.assertEqual(13.1, self._monitor.get_value('com.victronenergy.vecan.can1', '/Link/ChargeVoltage'))
self._check_values({'/Control/SolarChargeVoltage': 1})
def test_hub1_control_ve_can_service_no_solar_charger(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.63)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.vecan.can0', {
'/Link/ChargeVoltage': None})
self.assertEqual(None, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 13.2)
self._check_values({'/Control/SolarChargeVoltage': 0})
def test_hub1_control_ve_can_and_solar_charger(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.63)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.solarcharger.can0', {
'/State': 0,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7},
connection='VE.Can')
self._add_device('com.victronenergy.vecan.can0', {
'/Link/ChargeVoltage': 12.3,
'/Link/NetworkMode': None,
'/Link/VoltageSense': None,
'/Link/TemperatureSense': None})
self._update_values(3000)
self.assertEqual(12.63, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._update_values(3000)
self.assertEqual(12.63, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self.assertEqual(12.63, self._monitor.get_value('com.victronenergy.solarcharger.ttyO2',
'/Link/ChargeVoltage'))
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.53)
self._update_values(interval=10000)
self.assertEqual(5, self._monitor.get_value('com.victronenergy.solarcharger.ttyO2', '/Link/NetworkMode'))
self.assertEqual(12.53, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self.assertEqual(12.53, self._monitor.get_value('com.victronenergy.solarcharger.ttyO2',
'/Link/ChargeVoltage'))
self._check_values({'/Control/SolarChargeVoltage': 1})
def test_hub1_control_ve_can_service_no_setpoint(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.65)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._add_device('com.victronenergy.vecan.can0', {}, connection='VE.Can')
self._update_values()
self.assertEqual(None, self._monitor.get_value('com.victronenergy.vecan.can0', '/Link/ChargeVoltage'))
self._check_values({'/Control/SolarChargeCurrent': 0})
self._check_values({'/Control/SolarChargeVoltage': 0})
def test_hub1_control_vedirect_solarcharger_bms_battery(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 55.2)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 0)
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 3,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Settings/ChargeCurrentLimit': 100,
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 30,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 58.1,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=60000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeCurrent': 45 + 8,
'/Link/ChargeVoltage': 55.2},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 15,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/EffectiveChargeVoltage': 55.2,
'/Control/BmsParameters': 1})
def test_vedirect_solarcharger_bms_battery_max_charge_current_setting(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 55.2)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 0)
self._set_setting('/Settings/SystemSetup/MaxChargeCurrent', 40)
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 3,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Settings/ChargeCurrentLimit': 100,
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 30,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=60000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeCurrent': 40 + 8,
'/Link/ChargeVoltage': 55.2},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 10,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/EffectiveChargeVoltage': 55.2, # ESS
'/Control/BmsParameters': 1})
def test_control_vedirect_solarcharger_bms_battery_no_charge_voltage(self):
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 3,
'/Settings/ChargeCurrentLimit': 100,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 31,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=60000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeCurrent': 45 + 8,
'/Link/ChargeVoltage': 58.2},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 14,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/SolarChargerVoltageSense': 1,
'/Control/BmsParameters': 1})
def test_control_vedirect_solarcharger_charge_distribution(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Dc/0/MaxChargeCurrent', 0)
self._update_values()
self._add_device('com.victronenergy.solarcharger.ttyO0', {
'/State': 3,
'/Settings/ChargeCurrentLimit': 100,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': 15,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 14.3,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 3,
'/Settings/ChargeCurrentLimit': 100,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': 15,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 7,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
# Simulate the solar charger moving towards the requested limit
for _ in range(12):
self._update_values(interval=1000)
for c in ('com.victronenergy.solarcharger.ttyO0',
'com.victronenergy.solarcharger.ttyO2'):
self._monitor.set_value(c, '/Dc/0/Current', min(100.0,
self._monitor.get_value(c, '/Link/ChargeCurrent')))
total = self._monitor.get_value('com.victronenergy.vebus.ttyO1',
'/Dc/0/Current') + \
self._monitor.get_value('com.victronenergy.solarcharger.ttyO0',
'/Dc/0/Current') + \
self._monitor.get_value('com.victronenergy.solarcharger.ttyO2',
'/Dc/0/Current')
# Check that total is within 5%
self.assertTrue(abs(total - 25) <= 1.25)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO0': {
'/Link/NetworkMode': 13,
'/Link/ChargeVoltage': 58.2},
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeVoltage': 58.2},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 0,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
# Difference goes to the multi
'/Dc/0/MaxChargeCurrent': 0 }})
self._check_values({
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/EffectiveChargeVoltage': 58.2,
'/Control/BmsParameters': 1})
def test_control_vedirect_solarcharger_bms_ess_feedback(self):
# When feedback is allowed we do not limit MPPTs
# Force system type to ESS
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub4/AssistantId', 5)
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 58.3)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 1)
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 1,
'/Settings/ChargeCurrentLimit': 35,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 35,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=10000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeCurrent': 35,
'/Link/ChargeVoltage': 58.3},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 10,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/SystemType': 'ESS',
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/EffectiveChargeVoltage': 58.3,
'/Control/BmsParameters': 1})
def test_control_vedirect_solarcharger_bms_ess_feedback_no_ac_in(self):
# When feedback is allowed we do not limit MPPTs, but in this case there is no AC-in so feedback is
# not possible.
# Force system type to ESS
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub4/AssistantId', 5)
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 58.3)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 1)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Ac/ActiveIn/Connected', 0)
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 1,
'/Settings/ChargeCurrentLimit': 100,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': 57.3,
'/Link/ChargeCurrent': 20,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 31,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=60000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 13,
'/Link/ChargeCurrent': 45 + 8,
'/Link/ChargeVoltage': 58.3},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 14,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/SystemType': 'ESS',
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/EffectiveChargeVoltage': 58.3,
'/Control/BmsParameters': 1})
def test_hub1_control_vedirect_solarcharger_bms_battery_no_solarcharger(self):
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=10000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': 47,
'/BatteryOperationalLimits/MaxChargeCurrent': 25,
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2,
'/BatteryOperationalLimits/MaxDischargeCurrent': 50,
'/Dc/0/MaxChargeCurrent': 999}})
self._check_values({
'/Control/SolarChargeCurrent': 0,
'/Control/SolarChargeVoltage': 0,
'/Control/EffectiveChargeVoltage': 58.2,
'/Control/BmsParameters': 1})
def test_system_mapping(self):
self._update_values()
self._check_values({
'/ServiceMapping/com_victronenergy_vebus_0': 'com.victronenergy.vebus.ttyO1',
'/ServiceMapping/com_victronenergy_settings_0': 'com.victronenergy.settings'})
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery', values={'/DeviceInstance': 3})
self._check_values({
'/ServiceMapping/com_victronenergy_vebus_0': 'com.victronenergy.vebus.ttyO1',
'/ServiceMapping/com_victronenergy_battery_3': 'com.victronenergy.battery.ttyO2'})
self._remove_device('com.victronenergy.battery.ttyO2')
self.assertFalse('/ServiceMapping/com_victronenergy_battery_3' in self._service)
def test_hub1_extra_current(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 0)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3},
connection='VE.Direct')
self._update_values()
self.assertEqual(9.7 + 9.3, self._monitor.get_value('com.victronenergy.vebus.ttyO1',
'/ExtraBatteryCurrent'))
def test_hub1_extra_current_no_battery_no_solarcharger(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 1)
self._update_values()
self.assertEqual(0, self._monitor.get_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent'))
self._check_values({
'/Control/ExtraBatteryCurrent': 1,
'/Control/SolarChargeVoltage': 0,
'/Control/SolarChargeCurrent': 0,
'/Control/SolarChargerVoltageSense': 0})
def test_hub1_extra_current_hub2_no_battery_monitor(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 0)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3},
connection='VE.Direct')
self._update_values()
self.assertEqual(9.7 + 9.3, self._monitor.get_value('com.victronenergy.vebus.ttyO1',
'/ExtraBatteryCurrent'))
self._check_values({'/Control/ExtraBatteryCurrent': 1})
def test_hub1_no_extra_current(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', None)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._update_values()
self.assertIsNone(self._monitor.get_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent'))
self._check_values({'/Control/ExtraBatteryCurrent': 0})
def test_hub1_with_bmv_extra_current_battery(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 0)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2})
self._update_values()
self.assertEqual(9.7, self._monitor.get_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent'))
self._check_values({'/Control/ExtraBatteryCurrent': 1})
self._check_values({'/Control/VebusSoc': 0})
def test_hub2_extra_current_battery(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 0)
# Set hub-2 & Lynx Ion assistant
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/Devices/0/Assistants',
[0x4D, 0x01, 0x3C, 0x01] + (26 * [0]))
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2})
self._update_values(3000)
self.assertEqual(9.7, self._monitor.get_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent'))
self._check_values({
'/Control/ExtraBatteryCurrent': 1,
'/Control/VebusSoc': 0,
'/Control/SolarChargerVoltageSense': 1,
'/Control/SolarChargeVoltage': 0,
'/Control/SolarChargeCurrent': 0,
'/Control/BmsParameters': 0})
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None}})
def test_hub1_extra_current_no_active_battery(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent', 23)
self._set_setting('/Settings/SystemSetup/BatteryService', 'nobattery')
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.7},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2})
self._update_values()
self.assertEqual(9.7, self._monitor.get_value('com.victronenergy.vebus.ttyO1', '/ExtraBatteryCurrent'))
self._check_values({'/Control/ExtraBatteryCurrent': 1})
self._check_values({'/Control/VebusSoc': 0})
def test_multi_class(self):
from delegates.dvcc import Multi
multi = Multi(self._system_calc._dbusmonitor, self._service)
self.assertIsNone(multi.bol.chargevoltage)
self.assertIsNone(multi.bol.maxchargecurrent)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/BatteryOperationalLimits/MaxChargeVoltage', 26)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/BatteryOperationalLimits/MaxChargeCurrent', 99)
self._update_values()
self.assertEqual(multi.bol.chargevoltage, 26)
self.assertEqual(multi.bol.maxchargecurrent, 99)
multi.bol.chargevoltage = 27
multi.bol.maxchargecurrent = 55
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 27,
'/BatteryOperationalLimits/MaxChargeCurrent': 55,
}})
self.assertEqual(multi.firmwareversion, 0x456)
def test_multi_nobol(self):
from dbus.exceptions import DBusException
from delegates.dvcc import Multi
self._remove_device('com.victronenergy.vebus.ttyO1')
self._add_device('com.victronenergy.vebus.ttyB1',
product_name='Multi',
values={
'/State': 3,
})
self._update_values()
multi = Multi(self._system_calc._dbusmonitor, self._service)
self.assertIsNone(multi.bol.chargevoltage)
def test_solar_subsys(self):
from delegates.dvcc import SolarChargerSubsystem
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3
}, connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/FirmwareVersion': 0x129,
}, connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.socketcan_can0_di0_uc30688', {
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/FirmwareVersion': 0x102ff,
}, connection='VE.Can')
system = SolarChargerSubsystem(self._system_calc._dbusmonitor)
system.add_charger('com.victronenergy.solarcharger.ttyO1')
system.add_charger('com.victronenergy.solarcharger.ttyO2')
# Test __contains__
self.assertTrue('com.victronenergy.solarcharger.ttyO1' in system)
self.assertTrue('com.victronenergy.solarcharger.ttyO2' in system)
self.assertTrue('com.victronenergy.solarcharger.ttyO3' not in system)
# Test __len__
self.assertTrue(len(system)==2)
# test __iter__
chargers = list(system)
self.assertTrue(chargers[0].service == 'com.victronenergy.solarcharger.ttyO1')
self.assertTrue(chargers[1].service == 'com.victronenergy.solarcharger.ttyO2')
# Add vecan charger
self.assertFalse(system.has_vecan_chargers)
system.add_charger('com.victronenergy.solarcharger.socketcan_can0_di0_uc30688')
self.assertTrue(system.has_vecan_chargers)
# Check parallel support
self.assertTrue(system.has_externalcontrol_support)
def test_solar_subsys_distribution(self):
from delegates.dvcc import SolarChargerSubsystem
self._add_device('com.victronenergy.battery.socketcan_can0_di0_uc30688', {
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/Info/MaxChargeCurrent': 100
}, connection='VE.Can')
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': 14.5,
'/Link/ChargeCurrent': 50,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 40,
'/Settings/ChargeCurrentLimit': 70,
}, connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': 14.5,
'/Link/ChargeCurrent': 32,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 30,
'/Settings/ChargeCurrentLimit': 35,
}, connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO3', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': 14.5,
'/Link/ChargeCurrent': 12,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 10,
'/Settings/ChargeCurrentLimit': 15,
}, connection='VE.Direct')
system = SolarChargerSubsystem(self._system_calc._dbusmonitor)
system.add_charger('com.victronenergy.solarcharger.ttyO1')
system.add_charger('com.victronenergy.solarcharger.ttyO2')
system.add_charger('com.victronenergy.solarcharger.ttyO3')
self.assertTrue(system.capacity == 120)
self._monitor.set_value('com.victronenergy.battery.socketcan_can0_di0_uc30688', '/Info/MaxChargeCurrent', 100)
def test_battery_subsys_no_bms(self):
from delegates.dvcc import BatterySubsystem
self._add_device('com.victronenergy.battery.socketcan_can0_di0_uc30688', {
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3
}, connection='VE.Can')
system = BatterySubsystem(self._system_calc._dbusmonitor)
system.add_battery('com.victronenergy.battery.socketcan_can0_di0_uc30688')
self.assertEqual(system.bmses, [])
# Test magic methods
self.assertTrue('com.victronenergy.battery.socketcan_can0_di0_uc30688' in system)
self.assertTrue(len(system)==1)
batteries = list(system)
self.assertTrue(batteries[0].service == 'com.victronenergy.battery.socketcan_can0_di0_uc30688')
def test_battery_subsys_bms(self):
from delegates.dvcc import BatterySubsystem
self._add_device('com.victronenergy.battery.socketcan_can0_di0_uc30688', {
'/Dc/0/Voltage': 12.6,
'/Dc/0/Current': 9.3,
'/Info/MaxChargeVoltage': 15,
'/Info/MaxChargeCurrent': 100,
'/Info/MaxDischargeCurrent': 100
}, connection='VE.Can')
system = BatterySubsystem(self._system_calc._dbusmonitor)
battery = system.add_battery('com.victronenergy.battery.socketcan_can0_di0_uc30688')
self.assertTrue(system.bmses[0] is battery)
self.assertTrue(battery.maxchargecurrent == 100)
self.assertTrue(battery.chargevoltage == 15)
self.assertEqual(battery.voltage, 12.6)
def test_distribute(self):
from delegates.dvcc import distribute
actual = [1, 2, 3, 4, 5] # 15 amps
limits = [5, 5, 5, 5, 5] # 25 amps
# add 5 amps
newlimits = distribute(actual, limits, 5)
self.assertTrue(sum(newlimits)==20)
# max it out
newlimits = distribute(actual, limits, 10)
self.assertTrue(sum(newlimits)==25)
# overflow it
newlimits = distribute(actual, limits, 11)
self.assertTrue(sum(newlimits)==25)
# Drop 5 amps
newlimits = distribute(actual, limits, -5)
self.assertTrue(sum(newlimits)==10)
# Drop 10 amps
newlimits = distribute(actual, limits, -10)
self.assertTrue(sum(newlimits)==5)
# All of it
newlimits = distribute(actual, limits, -15)
self.assertTrue(sum(newlimits)==0)
# Attempt to go negative
newlimits = distribute(actual, limits, -20)
self.assertTrue(sum(newlimits)==0)
newlimits = distribute([2, 2], [2, 2], 20)
def test_hub1bridge_distr_1(self):
from delegates.dvcc import distribute
actual_values = [1, 2, 3]
max_values = [6, 5, 4]
new_values = distribute(actual_values, max_values, 3)
self.assertEqual(new_values, [2, 3, 4])
def test_hub1bridge_distr_2(self):
from delegates.dvcc import distribute
actual_values = [1, 2, 3]
max_values = [6, 5, 4]
new_values = distribute(actual_values, max_values, 9.0)
self.assertEqual(new_values, [6, 5, 4])
def test_hub1bridge_distr_3(self):
from delegates.dvcc import distribute
actual_values = [1, 2, 3]
max_values = [6, 5, 4]
new_values = distribute(actual_values, max_values, 10.0)
self.assertEqual(new_values, [6, 5, 4])
def test_hub1bridge_distr_4(self):
from delegates.dvcc import distribute
actual_values = [1, 2, 3]
max_values = [6, 5, 4]
new_values = distribute(actual_values, max_values, 6.0)
self.assertEqual(new_values, [3.5, 4.5, 4])
def test_hub1bridge_distr_5(self):
from delegates.dvcc import distribute
actual_values = [3, 2, 1]
max_values = [4, 5, 6]
new_values = distribute(actual_values, max_values, 6.0)
self.assertEqual(new_values, [4, 4.5, 3.5])
def test_hub1bridge_distr_6(self):
from delegates.dvcc import distribute
actual_values = [4, 5, 6]
max_values = [1, 2, 8]
new_values = distribute(actual_values, max_values, 0.0)
self.assertEqual(new_values, [1, 2, 8])
def test_hub1bridge_distr_7(self):
from delegates.dvcc import distribute
actual_values = [1]
max_values = [5]
new_values = distribute(actual_values, max_values, 6.0)
self.assertEqual(new_values, [5])
def test_debug_chargeoffsets(self):
self._update_values()
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 12.6)
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/State', 2)
self._service.set_value('/Debug/BatteryOperationalLimits/SolarVoltageOffset', 0.4)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x129},
connection='VE.Direct')
self._add_device('com.victronenergy.battery.ttyO2', product_name='battery',
values={
'/Dc/0/Voltage': 12.5,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 10,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 12.6,
'/Info/MaxDischargeCurrent': 50})
self._update_values(3000)
# Check that debug voltage works for solar chargers
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 13
}})
# Check that we can also offset the Multi's voltage and current
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 12.6
}})
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 15
}})
self._service.set_value('/Debug/BatteryOperationalLimits/VebusVoltageOffset', 0.2)
self._service.set_value('/Debug/BatteryOperationalLimits/CurrentOffset', 5)
self._update_values(3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 12.8
}})
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 20,
'/Dc/0/MaxChargeCurrent': 999
}})
def test_hub1_legacy_voltage_control(self):
# BOL support is off initialy
self._set_setting('/Settings/Services/Bol', 0)
self._update_values()
# Start without a BMS. No Current sharing should be done, only
# voltage.
self._monitor.add_value('com.victronenergy.vebus.ttyO1',
'/Hub/ChargeVoltage', 12.6)
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 252,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/Settings/ChargeCurrentLimit': 35,
'/FirmwareVersion': 0x0139},
connection='VE.Direct')
self._update_values(10000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 12.6,
'/Link/ChargeCurrent': None,
'/Link/NetworkMode': 5,
}})
self._check_values({'/Control/Dvcc': 0})
# Add a BMS
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.7,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 67,
'/Soc': 25.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 10,
'/Info/MaxChargeCurrent': 10,
'/Info/MaxChargeVoltage': 15,
'/Info/MaxDischargeCurrent': 10})
self._update_values(10000)
# Current should be shared with solar chargers. Voltage
# reflects the Multi's /Hub/ChargeVoltage
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 12.6,
'/Link/ChargeCurrent': 35,
'/Link/NetworkMode': 13,
}})
self._check_values({
'/Control/EffectiveChargeVoltage': None,
'/Control/Dvcc': 0
})
# Switch to DVCC
self._set_setting('/Settings/Services/Bol', 1)
self._update_values(10000)
# Now the charge current of the BMS was used.
self._check_external_values({
'com.victronenergy.solarcharger.ttyO1': {
'/Link/ChargeVoltage': 12.6,
'/Link/ChargeCurrent': 18, # 10 + 8 for the Multi
'/Link/NetworkMode': 13,
}})
self._check_values({
'/Control/Dvcc': 1,
'/Control/EffectiveChargeVoltage': 12.6,
})
def test_byd_bbox_p_quirks(self):
""" BYD B-Box-Pro batteries should float at 55V when they send CCL=0. """
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 55.1,
'/Dc/0/Current': 3,
'/Dc/0/Power': 165.3,
'/Soc': 100,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 100,
'/Info/MaxChargeVoltage': 56.5,
'/Info/MaxDischargeCurrent': 100,
'/ProductId': 0xB00A})
self._update_values(interval=10000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 100,
'/BatteryOperationalLimits/MaxChargeVoltage': 56.5
}
})
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/Info/MaxChargeCurrent', 0)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 40,
'/BatteryOperationalLimits/MaxChargeVoltage': 55
}
})
self._check_values({ '/Control/EffectiveChargeVoltage': 55 })
def test_byd_bbox_l_quirks(self):
""" BYD B-Box-L batteries should float at 55V when they send CCL=0. """
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 55.1,
'/Dc/0/Current': 3,
'/Dc/0/Power': 165.3,
'/Soc': 100,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 100,
'/Info/MaxChargeVoltage': 56.5,
'/Info/MaxDischargeCurrent': 100,
'/ProductId': 0xB015})
self._update_values(interval=10000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 100,
'/BatteryOperationalLimits/MaxChargeVoltage': 56.5
}
})
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/Info/MaxChargeCurrent', 0)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 40,
'/BatteryOperationalLimits/MaxChargeVoltage': 55
}
})
self._check_values({ '/Control/EffectiveChargeVoltage': 55 })
def test_lg_quirks(self):
""" LG Batteries run at 57.7V, when we add an 0.4V offset we sometimes
trip the overvoltage protection at 58.1V. So we attempt to avoid that
when feed-in is active. """
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 55.1,
'/Dc/0/Current': 3,
'/Dc/0/Power': 165.3,
'/Soc': 100,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 94,
'/Info/MaxChargeVoltage': 57.7,
'/Info/MaxDischargeCurrent': 100,
'/ProductId': 0xB004})
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 57.3,
'/BatteryOperationalLimits/MaxChargeCurrent': 94
}
})
self._check_values({ '/Control/EffectiveChargeVoltage': 57.3 })
def test_pylontech_quirks(self):
""" Pylontech Batteries run at 53.2V and raise an alarm at 54V.
We attempt to avoid this with a lower charge voltage. """
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 51.8,
'/Dc/0/Current': 3,
'/Dc/0/Power': 155.4,
'/Soc': 95,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': None,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 53.2,
'/Info/MaxDischargeCurrent': 25,
'/InstalledCapacity': None,
'/ProductId': 0xB009})
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 52.4,
'/BatteryOperationalLimits/MaxChargeCurrent': 25
}
})
self._check_values({ '/Control/EffectiveChargeVoltage': 52.4 })
# 24V battery is scaled accordingly
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/Info/MaxChargeVoltage', 28.4)
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/Info/MaxChargeCurrent', 55)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 27.8,
'/BatteryOperationalLimits/MaxChargeCurrent': 55
}
})
self._check_values({ '/Control/EffectiveChargeVoltage': 27.8 })
# 24V battery has a CCL=0 quirk, replace with 0.25C charge rate. If charge rate is unknown
# assume a single module at 55Ah.
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/Info/MaxChargeCurrent', 0)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 14
}
})
self._monitor.set_value('com.victronenergy.battery.ttyO2', '/InstalledCapacity', 222)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeCurrent': 56
}
})
def test_no_bms_max_charge_current_setting(self):
# Test that with no BMS but a user limit, /Dc/0/MaxChargeCurrent is correctly set.
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 55.2)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 0)
self._set_setting('/Settings/SystemSetup/MaxChargeCurrent', 40)
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 3,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Settings/ChargeCurrentLimit': 100,
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 30,
'/FirmwareVersion': 0x0129},
connection='VE.Direct')
self._update_values(interval=60000)
self._check_external_values({
'com.victronenergy.solarcharger.ttyO2': {
'/Link/NetworkMode': 5,
'/Link/ChargeCurrent': 40 + 8,
'/Link/ChargeVoltage': 55.2},
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/BatteryLowVoltage': None,
'/BatteryOperationalLimits/MaxChargeCurrent': None,
'/BatteryOperationalLimits/MaxChargeVoltage': None,
'/BatteryOperationalLimits/MaxDischargeCurrent': None,
'/Dc/0/MaxChargeCurrent': 10}})
self._check_values({
'/Control/SolarChargeCurrent': 1,
'/Control/SolarChargeVoltage': 1,
'/Control/BmsParameters': 0})
def test_battery_properties(self):
""" Test the propertes of battery objects. """
from delegates.dvcc import Dvcc
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 51.8,
'/Dc/0/Current': 3,
'/Dc/0/Power': 155.4,
'/Soc': 95,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': None,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 53.2,
'/Info/MaxDischargeCurrent': 25,
'/ProductId': 0xB009})
self._update_values(interval=3000)
batteries = list(Dvcc.instance._batterysystem)
self.assertEqual(batteries[0].device_instance, 2)
self.assertTrue(batteries[0].is_bms)
def test_bms_selection(self):
""" Test that if there is more than one BMS in the system,
the active battery service is preferred. """
from delegates.dvcc import Dvcc
self._set_setting('/Settings/SystemSetup/BatteryService', 'com.victronenergy.battery/1')
self._check_values({'/ActiveBatteryService': None})
self._add_device('com.victronenergy.battery.ttyO1',
product_name='battery',
values={
'/Dc/0/Voltage': 51.8,
'/Dc/0/Current': 3,
'/Dc/0/Power': 155.4,
'/Soc': 95,
'/DeviceInstance': 0,
'/Info/BatteryLowVoltage': None,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 53.2,
'/Info/MaxDischargeCurrent': 25,
'/ProductId': 0xB009})
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 52.8,
'/Dc/0/Current': 4,
'/Dc/0/Power': 152.4,
'/Soc': 95,
'/DeviceInstance': 1,
'/Info/BatteryLowVoltage': None,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 53.2,
'/Info/MaxDischargeCurrent': 25,
'/ProductId': 0xB009})
self._check_values({'/ActiveBatteryService': 'com.victronenergy.battery/1'})
self.assertEqual(len(Dvcc.instance._batterysystem.bmses), 2)
# Check that the selected battery is chosen, as both here have BMSes
self.assertEqual(Dvcc.instance.bms.service, 'com.victronenergy.battery.ttyO2')
def test_bms_selection_lowest_deviceinstance(self):
""" Test that if there is more than one BMS in the system,
the lowest device instance """
from delegates.dvcc import Dvcc
# Select a non-existent battery service to ensure that none is active
self._set_setting('/Settings/SystemSetup/BatteryService', 'com.victronenergy.battery/111')
for did in (1, 0, 2):
self._add_device('com.victronenergy.battery.ttyO{}'.format(did),
product_name='battery',
values={
'/Dc/0/Voltage': 51.8,
'/Dc/0/Current': 3,
'/Dc/0/Power': 155.4,
'/Soc': 95,
'/DeviceInstance': did,
'/Info/BatteryLowVoltage': None,
'/Info/MaxChargeCurrent': 25,
'/Info/MaxChargeVoltage': 53.2,
'/Info/MaxDischargeCurrent': 25,
'/ProductId': 0xB009})
self._check_values({'/ActiveBatteryService': None})
self.assertEqual(len(Dvcc.instance._batterysystem.bmses), 3)
# Check that the lowest deviceinstante is chosen, as all here have BMSes
self.assertEqual(Dvcc.instance.bms.service, 'com.victronenergy.battery.ttyO0')
def test_bms_selection_no_bms(self):
""" Test that delegate shows no BMS if none is available. """
from delegates.dvcc import Dvcc
self._add_device('com.victronenergy.battery.ttyO1',
product_name='battery',
values={
'/Dc/0/Voltage': 51.8,
'/Dc/0/Current': 3,
'/Dc/0/Power': 155.4,
'/Soc': 95,
'/DeviceInstance': 0})
self.assertEqual(Dvcc.instance.bms, None)
def test_firmware_warning(self):
self._add_device('com.victronenergy.solarcharger.ttyO1', {
'/State': 0,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x129},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.ttyO2', {
'/State': 0,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/FirmwareVersion': 0x117},
connection='VE.Direct')
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 1})
# Upgrade ttyO2
self._monitor.add_value('com.victronenergy.solarcharger.ttyO2', '/FirmwareVersion', 0x129)
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 0})
# Give it a 24-bit version that is too old
self._monitor.add_value('com.victronenergy.solarcharger.ttyO2', '/FirmwareVersion', 0x101ff)
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 1})
# Give it a 24-bit version that is new
self._monitor.add_value('com.victronenergy.solarcharger.ttyO2', '/FirmwareVersion', 0x102ff)
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 0})
# Downgrade the Multi
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/FirmwareVersion', 0x418)
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 1})
self._monitor.set_value('com.victronenergy.vebus.ttyO1', '/FirmwareVersion', 0x456)
# Add an RS MPPT, give it "old" firmware, ensure it does not raise
# the alarm. The RS MPPT has support despite the low number.
self._add_device('com.victronenergy.solarcharger.ttyO3', {
'/State': 0,
'/Dc/0/Voltage': 12.4,
'/Dc/0/Current': 9.7,
'/ProductId': 0xA102,
'/FirmwareVersion': 0x100ff},
connection='VE.Direct')
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/FirmwareInsufficient': 0})
def test_multiple_battery_warning(self):
self._check_values({'/Dvcc/Alarms/MultipleBatteries': 0})
self._add_device('com.victronenergy.battery.ttyO1',
product_name='battery',
values={
'/Dc/0/Voltage': 58.1,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/MaxChargeVoltage': 55})
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/MultipleBatteries': 0})
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 58.1,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 3})
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/MultipleBatteries': 0})
self._add_device('com.victronenergy.battery.ttyO3',
product_name='battery',
values={
'/Dc/0/Voltage': 58.1,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 4,
'/Info/MaxChargeVoltage': 54})
self._update_values(3000)
self._check_values({'/Dvcc/Alarms/MultipleBatteries': 1})
def test_only_forward_charge_current_to_n2k_zero(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 55.2)
self._monitor.add_value('com.victronenergy.settings', '/Settings/CGwacs/OvervoltageFeedIn', 0)
self._set_setting('/Settings/SystemSetup/MaxChargeCurrent', 10)
self._add_device('com.victronenergy.solarcharger.socketcan_can0_vi0_B00B135', {
'/State': 3,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Settings/ChargeCurrentLimit': 100,
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 30,
'/FirmwareVersion': 0x0129,
'/N2kDeviceInstance': 0},
connection='VE.Direct')
self._add_device('com.victronenergy.solarcharger.socketcan_can0_vi0_B00B136', {
'/State': 3,
'/Link/NetworkMode': 0,
'/Link/ChargeVoltage': None,
'/Link/ChargeCurrent': None,
'/Link/VoltageSense': None,
'/Settings/ChargeCurrentLimit': 100,
'/Dc/0/Voltage': 58.0,
'/Dc/0/Current': 30,
'/FirmwareVersion': 0x0129,
'/N2kDeviceInstance': 1},
connection='VE.Direct')
self._update_values(60000)
# Check that charge current limit is only forwarded to N2kDeviceInstance == 0
self._check_external_values({
'com.victronenergy.solarcharger.socketcan_can0_vi0_B00B135': {
'/Link/ChargeCurrent': 10 + 8}, # 8A vebus dc current
'com.victronenergy.solarcharger.socketcan_can0_vi0_B00B136': {
'/Link/ChargeCurrent': None},
})
def test_charge_voltage_override(self):
self._monitor.add_value('com.victronenergy.vebus.ttyO1', '/Hub/ChargeVoltage', 55.2)
self._set_setting('/Settings/SystemSetup/MaxChargeVoltage', 0.0)
self._add_device('com.victronenergy.battery.ttyO2',
product_name='battery',
values={
'/Dc/0/Voltage': 12.3,
'/Dc/0/Current': 5.3,
'/Dc/0/Power': 65,
'/Soc': 15.3,
'/DeviceInstance': 2,
'/Info/BatteryLowVoltage': 47,
'/Info/MaxChargeCurrent': 45,
'/Info/MaxChargeVoltage': 58.2,
'/Info/MaxDischargeCurrent': 50})
self._update_values(interval=3000)
# Following the battery
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2}})
# Following lower of the two
self._set_setting('/Settings/SystemSetup/MaxChargeVoltage', 59)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 58.2}})
# Following user limit
self._set_setting('/Settings/SystemSetup/MaxChargeVoltage', 54.5)
self._update_values(interval=3000)
self._check_external_values({
'com.victronenergy.vebus.ttyO1': {
'/BatteryOperationalLimits/MaxChargeVoltage': 54.5}})
| 36.58499 | 112 | 0.705572 |
d01a67fe9e7ca5a044bf90cdc4ed6d374afe27ef | 2,446 | py | Python | sktime/dists_kernels/compose_from_align.py | mikofski/sktime | 87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7 | [
"BSD-3-Clause"
] | 2 | 2021-12-28T10:48:11.000Z | 2022-03-06T18:08:01.000Z | sktime/dists_kernels/compose_from_align.py | mikofski/sktime | 87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7 | [
"BSD-3-Clause"
] | null | null | null | sktime/dists_kernels/compose_from_align.py | mikofski/sktime | 87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Composer that creates distance from aligner."""
__author__ = ["fkiraly"]
import numpy as np
from sklearn import clone
from sktime.dists_kernels._base import BasePairwiseTransformerPanel
class DistFromAligner(BasePairwiseTransformerPanel):
"""Distance transformer from aligner.
Behaviour: uses aligner.get_distance on pairs to obtain distance matrix.
Components
----------
aligner: BaseAligner, must implement get_distances method
"""
_tags = {
"symmetric": True, # all the distances are symmetric
}
def __init__(self, aligner):
self.aligner = aligner
super(DistFromAligner, self).__init__()
def _transform(self, X, X2=None):
"""Compute distance/kernel matrix.
Core logic
Behaviour: returns pairwise distance/kernel matrix
between samples in X and X2
if X2 is not passed, is equal to X
if X/X2 is a pd.DataFrame and contains non-numeric columns,
these are removed before computation
Parameters
----------
X: pd.DataFrame of length n, or 2D np.array with n rows
X2: pd.DataFrame of length m, or 2D np.array with m rows, optional
default X2 = X
Returns
-------
distmat: np.array of shape [n, m]
(i,j)-th entry contains distance/kernel between X.iloc[i] and X2.iloc[j]
"""
self.aligner_ = clone(self.aligner)
aligner = self.aligner_
# find out whether we know that the resulting matrix is symmetric
# since aligner distances are always symmetric,
# we know it's the case for sure if X equals X2
if X2 is None:
X = X2
symm = True
else:
symm = False
n = len(X)
m = len(X2)
distmat = np.zeros((n, m), dtype="float")
for i in range(n):
for j in range(m):
if symm and j < i:
distmat[i, j] = distmat[j, i]
else:
distmat[i, j] = aligner.fit([X[i], X2[j]]).get_distance()
return distmat
@classmethod
def get_test_params(cls):
"""Test parameters for DistFromAligner."""
# importing inside to avoid circular dependencies
from sktime.alignment.dtw_python import AlignerDTW
return {"aligner": AlignerDTW()}
| 28.114943 | 84 | 0.584219 |
5b2ad0ef35346a3aa7e1feaa7b79c5e92869b7ae | 4,707 | py | Python | Cap12/modulos/utils.py | CezarPoeta/Python-Fundamentos | 53972d21bea86fdba90a3fafa487be6959ccebb8 | [
"MIT"
] | 1 | 2019-02-03T10:53:55.000Z | 2019-02-03T10:53:55.000Z | 12-Introducao_a_Deep_Learning/modulos/utils.py | alineAssuncao/Python_Fundamentos_Analise_Dados | 872781f2cec24487b0f29f62afeb60650a451bfd | [
"MIT"
] | null | null | null | 12-Introducao_a_Deep_Learning/modulos/utils.py | alineAssuncao/Python_Fundamentos_Analise_Dados | 872781f2cec24487b0f29f62afeb60650a451bfd | [
"MIT"
] | null | null | null | # Imports
import pandas as pd
import numpy as np
import os, sys, inspect
from six.moves import cPickle as pickle
import scipy.misc as misc
# Parâmetros
IMAGE_SIZE = 48
NUM_LABELS = 7
# Usando 10% dos dados para validação
VALIDATION_PERCENT = 0.1
# Normalização
IMAGE_LOCATION_NORM = IMAGE_SIZE // 2
# Seed
np.random.seed(0)
# For training
train_error_list = []
train_step_list = []
# For validation
valid_error_list = []
valid_step_list = []
# Emoções
emotion = {0:'anger',
1:'disgust',
2:'fear',
3:'happy',
4:'sad',
5:'surprise',
6:'neutral'}
# Classe para o resultado em teste
class testResult:
def __init__(self):
self.anger = 0
self.disgust = 0
self.fear = 0
self.happy = 0
self.sad = 0
self.surprise = 0
self.neutral = 0
def evaluate(self,label):
if (0 == label):
self.anger = self.anger+1
if (1 == label):
self.disgust = self.disgust+1
if (2 == label):
self.fear = self.fear+1
if (3 == label):
self.happy = self.happy+1
if (4 == label):
self.sad = self.sad+1
if (5 == label):
self.surprise = self.surprise+1
if (6 == label):
self.neutral = self.neutral+1
def display_result(self,evaluations):
print("anger = " + str((self.anger/float(evaluations))*100) + "%")
print("disgust = " + str((self.disgust/float(evaluations))*100) + "%")
print("fear = " + str((self.fear/float(evaluations))*100) + "%")
print("happy = " + str((self.happy/float(evaluations))*100) + "%")
print("sad = " + str((self.sad/float(evaluations))*100) + "%")
print("surprise = " + str((self.surprise/float(evaluations))*100) + "%")
print("neutral = " + str((self.neutral/float(evaluations))*100) + "%")
# Função para leitura dos dados
def read_data(data_dir, force=False):
def create_onehot_label(x):
label = np.zeros((1, NUM_LABELS), dtype=np.float32)
label[:, int(x)] = 1
return label
pickle_file = os.path.join(data_dir, "EmotionDetectorData.pickle")
if force or not os.path.exists(pickle_file):
train_filename = os.path.join(data_dir, "train.csv")
data_frame = pd.read_csv(train_filename)
data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
print("Lendo train.csv ...")
train_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
print(train_images.shape)
train_labels = np.array(list(map(create_onehot_label, data_frame['Emotion'].values))).reshape(-1, NUM_LABELS)
print(train_labels.shape)
permutations = np.random.permutation(train_images.shape[0])
train_images = train_images[permutations]
train_labels = train_labels[permutations]
validation_percent = int(train_images.shape[0] * VALIDATION_PERCENT)
validation_images = train_images[:validation_percent]
validation_labels = train_labels[:validation_percent]
train_images = train_images[validation_percent:]
train_labels = train_labels[validation_percent:]
print("Lendo test.csv ...")
test_filename = os.path.join(data_dir, "test.csv")
data_frame = pd.read_csv(test_filename)
data_frame['Pixels'] = data_frame['Pixels'].apply(lambda x: np.fromstring(x, sep=" ") / 255.0)
data_frame = data_frame.dropna()
test_images = np.vstack(data_frame['Pixels']).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
with open(pickle_file, "wb") as file:
try:
print('\nSalvando ...')
save = {
"train_images": train_images,
"train_labels": train_labels,
"validation_images": validation_images,
"validation_labels": validation_labels,
"test_images": test_images,
}
pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)
except:
print("Não foi possível salvar :/")
with open(pickle_file, "rb") as file:
save = pickle.load(file)
train_images = save["train_images"]
train_labels = save["train_labels"]
validation_images = save["validation_images"]
validation_labels = save["validation_labels"]
test_images = save["test_images"]
return train_images, train_labels, validation_images, validation_labels, test_images
| 34.610294 | 117 | 0.596346 |
af8d93cb519cac4ca1bc71007767f74c93b1ddf7 | 6,362 | py | Python | catalyst/dl/metrics.py | stalkermustang/catalyst | 687bc6c31dfdc44ae3ff62938e11e69ce1999cd4 | [
"MIT"
] | null | null | null | catalyst/dl/metrics.py | stalkermustang/catalyst | 687bc6c31dfdc44ae3ff62938e11e69ce1999cd4 | [
"MIT"
] | null | null | null | catalyst/dl/metrics.py | stalkermustang/catalyst | 687bc6c31dfdc44ae3ff62938e11e69ce1999cd4 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from .utils import get_activation_by_name
def accuracy(outputs, targets, topk=(1, )):
"""
Computes the accuracy@k for the specified values of k
"""
max_k = max(topk)
batch_size = targets.size(0)
_, pred = outputs.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def average_accuracy(outputs, targets, k=10):
"""
Computes the average accuracy at k.
This function computes the average accuracy at k
between two lists of items.
Parameters
----------
outputs : list
A list of predicted elements
targets : list
A list of elements that are to be predicted
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average accuracy at k over the input lists
"""
if len(outputs) > k:
outputs = outputs[:k]
score = 0.0
num_hits = 0.0
for i, predict in enumerate(outputs):
if predict in targets and predict not in outputs[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not targets:
return 0.0
return score / min(len(targets), k)
def mean_average_accuracy(outputs, targets, topk=(1, )):
"""
Computes the mean average accuracy at k.
This function computes the mean average accuracy at k between two lists
of lists of items.
Parameters
----------
outputs : list
A list of lists of predicted elements
targets : list
A list of lists of elements that are to be predicted
topk : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average accuracy at k over the input lists
"""
max_k = max(topk)
_, pred = outputs.topk(max_k, 1, True, True)
targets = targets.data.cpu().numpy().tolist()
actual_list = []
for a in targets:
actual_list.append([a])
targets = actual_list
pred = pred.tolist()
res = []
for k in topk:
ap = np.mean(
[average_accuracy(p, a, k) for a, p in zip(targets, pred)]
)
res.append(ap)
return res
def dice(outputs, targets, eps: float = 1e-7, activation: str = "sigmoid"):
"""
Computes the dice metric
Args:
outputs (list): A list of predicted elements
targets (list): A list of elements that are to be predicted
eps (float): epsilon
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
double: Dice score
"""
activation_fn = get_activation_by_name(activation)
outputs = activation_fn(outputs)
intersection = torch.sum(targets * outputs)
sum_ = torch.sum(targets) + torch.sum(outputs) + eps
return (2 * intersection + eps) / sum_
def iou(
outputs: torch.Tensor,
targets: torch.Tensor,
eps: float = 1e-7,
threshold: float = 0.5,
activation: str = "sigmoid"
):
"""
Args:
outputs (torch.Tensor): A list of predicted elements
targets (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold (float): threshold for outputs binarization
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
float: IoU (Jaccard) score
"""
activation_fn = get_activation_by_name(activation)
outputs = activation_fn(outputs)
if threshold is not None:
outputs = (outputs > threshold).float()
intersection = torch.sum(targets * outputs)
union = torch.sum(targets) + torch.sum(outputs) - intersection + eps
return (intersection + eps) / union
jaccard = iou
def soft_iou(
outputs: torch.Tensor,
targets: torch.Tensor,
eps: float = 1e-7,
threshold: float = 0.5,
activation: str = "sigmoid"
):
"""
Args:
outputs (torch.Tensor): A list of predicted elements
targets (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold (float): threshold for outputs binarization
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
float: SoftIoU (SoftJaccard) score
"""
jaccards = []
for class_i in range(outputs.shape[1]):
jaccard_i = iou(
outputs[:, class_i, :, :],
targets[:, class_i, :, :],
eps=eps,
threshold=threshold,
activation=activation,
)
jaccards.append(jaccard_i)
return torch.mean(torch.stack(jaccards))
def f_score(
outputs: torch.Tensor,
targets: torch.Tensor,
beta: float = 1,
eps: float = 1e-7,
threshold: float = 0.5,
activation: str = "sigmoid"
):
"""
Source:
https://github.com/qubvel/segmentation_models.pytorch
Args:
outputs (torch.Tensor): A list of predicted elements
targets (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
beta (float): beta param for f_score
threshold (float): threshold for outputs binarization
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
float: F_1 score
"""
activation_fn = get_activation_by_name(activation)
outputs = activation_fn(outputs)
if threshold is not None:
outputs = (outputs > threshold).float()
true_positive = torch.sum(targets * outputs)
false_positive = torch.sum(outputs) - true_positive
false_negative = torch.sum(targets) - true_positive
precision_plus_recall = (1 + beta ** 2) * true_positive + \
beta ** 2 * false_negative + false_positive + eps
score = ((1 + beta**2) * true_positive + eps) / precision_plus_recall
return score
| 28.657658 | 76 | 0.615687 |
e54b23d8b5ab770cf680fd278a771bfcf0ea612d | 4,227 | py | Python | HeavyFlavorAnalysis/Onia2MuMu/python/oniaPATMuonsWithTrigger_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | HeavyFlavorAnalysis/Onia2MuMu/python/oniaPATMuonsWithTrigger_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | HeavyFlavorAnalysis/Onia2MuMu/python/oniaPATMuonsWithTrigger_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#this is our version of the patMuonsWithTrigger from MuonAnalysis, we have rename all methods to avoid any clash, and remove
#all dependencies othen than to PatAlgos.
### ==== Make PAT Muons ====
import PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi
oniaPATMuonsWithoutTrigger = PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi.patMuons.clone(
muonSource = 'muons',
# embed the tracks, so we don't have to carry them around
embedTrack = True,
embedCombinedMuon = True,
embedStandAloneMuon = True,
embedPFCandidate = False,
embedCaloMETMuonCorrs = cms.bool(False),
embedTcMETMuonCorrs = cms.bool(False),
embedPfEcalEnergy = cms.bool(False),
# then switch off some features we don't need
embedPickyMuon = False,
embedTpfmsMuon = False,
userIsolation = cms.PSet(), # no extra isolation beyond what's in reco::Muon itself
isoDeposits = cms.PSet(), # no heavy isodeposits
addGenMatch = False, # no mc: T&P doesn't take it from here anyway.
)
# Reset all these; the default in muonProducer_cfi is not empty, but wrong
oniaPATMuonsWithoutTrigger.userData.userInts.src = []
oniaPATMuonsWithoutTrigger.userData.userFloats.src = []
oniaPATMuonsWithoutTrigger.userData.userCands.src = []
oniaPATMuonsWithoutTrigger.userData.userClasses.src = []
### ==== Unpack trigger, and match ====
from PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cfi import patTrigger as oniaPATTriggerTMP
oniaPATTriggerTMP.onlyStandAlone = True
oniaPATTrigger = cms.EDProducer("TriggerObjectFilterByCollection",
src = cms.InputTag("oniaPATTriggerTMP"),
collections = cms.vstring("hltL2MuonCandidates", "hltL3MuonCandidates", "hltHighPtTkMuonCands", "hltGlbTrkMuonCands")
)
### ==== Then perform a match for all HLT triggers of interest
PATmuonTriggerMatchHLT = cms.EDProducer( "PATTriggerMatcherDRDPtLessByR",
src = cms.InputTag( "oniaPATMuonsWithoutTrigger" ),
matched = cms.InputTag( "oniaPATTrigger" ),
matchedCuts = cms.string(""),
maxDPtRel = cms.double( 0.5 ),
maxDeltaR = cms.double( 0.5 ),
resolveAmbiguities = cms.bool( True ),
resolveByMatchQuality = cms.bool( True ) #change with respect to previous tag
)
PATmuonMatchHLTL2 = PATmuonTriggerMatchHLT.clone(matchedCuts = cms.string('coll("hltL2MuonCandidates")'),
maxDeltaR = 0.3, maxDPtRel = 10.0) #maxDeltaR Changed accordingly to Zoltan tuning. It was: 1.2
PATmuonMatchHLTL3 = PATmuonTriggerMatchHLT.clone(matchedCuts = cms.string('coll("hltL3MuonCandidates")'),
maxDeltaR = 0.1, maxDPtRel = 10.0) #maxDeltaR Changed accordingly to Zoltan tuning. It was: 0.5
PATmuonMatchHLTL3T = PATmuonTriggerMatchHLT.clone(matchedCuts = cms.string('coll("hltGlbTrkMuonCands")'),
maxDeltaR = 0.1, maxDPtRel = 10.0) #maxDeltaR Changed accordingly to Zoltan tuning. It was: 0.5
PATmuonMatchHLTTkMu = PATmuonTriggerMatchHLT.clone(matchedCuts = cms.string('coll("hltHighPtTkMuonCands")'),
maxDeltaR = 0.1, maxDPtRel = 10.0) #maxDeltaR Changed accordingly to Zoltan tuning. It was: 0.5
oniaPATTriggerMatchers1Mu = cms.Sequence(
PATmuonMatchHLTL2 +
PATmuonMatchHLTL3 +
PATmuonMatchHLTL3T +
PATmuonMatchHLTTkMu
)
oniaPATTriggerMatchers1MuInputTags = [
cms.InputTag('PATmuonMatchHLTL2'),
cms.InputTag('PATmuonMatchHLTL3'),
cms.InputTag('PATmuonMatchHLTL3T'),
cms.InputTag('PATmuonMatchHLTTkMu'),
]
## ==== Embed ====
oniaPATMuonsWithTrigger = cms.EDProducer( "PATTriggerMatchMuonEmbedder",
src = cms.InputTag( "oniaPATMuonsWithoutTrigger" ),
matches = cms.VInputTag()
)
oniaPATMuonsWithTrigger.matches += oniaPATTriggerMatchers1MuInputTags
## ==== Trigger Sequence ====
oniaPATTriggerMatching = cms.Sequence(
oniaPATTriggerTMP * oniaPATTrigger *
oniaPATTriggerMatchers1Mu *
oniaPATMuonsWithTrigger
)
oniaPATMuonsWithTriggerSequence = cms.Sequence(
oniaPATMuonsWithoutTrigger *
oniaPATTriggerMatching
)
| 45.945652 | 152 | 0.706884 |
a8783f17bd6ea2b8dae7aa735e20eaedb9bf483c | 983 | py | Python | vessels_info/baltic/baltic/spiders/Baltic_Only_IMO.py | ParveshDhawan/Scrapping_Spiders | 7139ed60af9a2982d4ac085a98fa648b796bdadd | [
"MIT"
] | null | null | null | vessels_info/baltic/baltic/spiders/Baltic_Only_IMO.py | ParveshDhawan/Scrapping_Spiders | 7139ed60af9a2982d4ac085a98fa648b796bdadd | [
"MIT"
] | null | null | null | vessels_info/baltic/baltic/spiders/Baltic_Only_IMO.py | ParveshDhawan/Scrapping_Spiders | 7139ed60af9a2982d4ac085a98fa648b796bdadd | [
"MIT"
] | null | null | null | import scrapy
from scrapy_selenium import SeleniumRequest
from selenium.common.exceptions import NoSuchElementException
class BalticOnlyImoSpider(scrapy.Spider):
name = 'Baltic_Only_IMO'
def start_requests(self):
yield SeleniumRequest(
url='http://www.balticshipping.com/vessels/',
screenshot=True,
callback = self.parse
)
def parse(self, response):
driver = response.meta['driver']
while True:
try:
page_links = [h.get_attribute('href') for h in driver.find_elements_by_xpath("//a[@class='col-sm-4 vessel-list-item']")]
for i in page_links:
yield {
'IMO_Links':i,
}
next_page = driver.find_element_by_xpath("//a[(text()= 'Next')]")
next_page.click()
driver.implicitly_wait(10)
except NoSuchElementException:
break | 37.807692 | 136 | 0.568667 |
e10d2f466158feb49f4440d571199f99be62a94c | 524 | py | Python | app/core/management/utils/xse_client.py | OpenLXP/openlxp-xis | b15244d5885c4367d661247462d865e7a30d8996 | [
"Apache-2.0"
] | null | null | null | app/core/management/utils/xse_client.py | OpenLXP/openlxp-xis | b15244d5885c4367d661247462d865e7a30d8996 | [
"Apache-2.0"
] | 17 | 2020-12-15T18:33:30.000Z | 2022-03-18T02:46:15.000Z | app/core/management/utils/xse_client.py | OpenLXP/openlxp-xis | b15244d5885c4367d661247462d865e7a30d8996 | [
"Apache-2.0"
] | null | null | null | import logging
from core.models import XISConfiguration
logger = logging.getLogger('dict_config_logger')
def get_elasticsearch_endpoint():
"""Setting API endpoint for XIS and XSE communication """
configuration = XISConfiguration.objects.first()
api_es_endpoint = configuration.xse_host
return api_es_endpoint
def get_elasticsearch_index():
"""Setting elastic search index """
configuration = XISConfiguration.objects.first()
api_es_index = configuration.xse_index
return api_es_index
| 26.2 | 62 | 0.769084 |
d9e42f0d1fe751179b5d4fe38c7115b0282c6154 | 305 | py | Python | 2016/program-stream-schedule/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2016/program-stream-schedule/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2016/program-stream-schedule/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1b2Km3mfrn5IEXr2RbRJ1WUarzzYSAJosdRcNdXLnjqk'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
b52cb6675b2aef569dd31f5896ce310a7749af0d | 7,367 | py | Python | composer/utils/reproducibility.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | 945 | 2021-10-13T16:24:20.000Z | 2022-03-31T21:21:54.000Z | composer/utils/reproducibility.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | 544 | 2021-10-13T20:23:27.000Z | 2022-03-31T02:47:54.000Z | composer/utils/reproducibility.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | 39 | 2021-10-13T14:33:33.000Z | 2022-03-31T11:13:19.000Z | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper utilities for configuring deterministic training to ensure reproducibility.
.. note::
For deterministic model initialization, :func:`~.seed_all` and/or
:func:`~.configure_deterministic_mode` should be
invoked before creating and initializing a model, before creating the :class:`~.Trainer`.
For example:
.. testsetup::
import functools
import torch.nn
import warnings
warnings.filterwarnings(action="ignore", message="Deterministic mode is activated.")
MyModel = Model
.. doctest::
>>> import torch.nn
>>> from composer.utils import reproducibility
>>> reproducibility.configure_deterministic_mode()
>>> reproducibility.seed_all(42)
>>> model = MyModel()
>>> def init_weights(m):
... if isinstance(m, torch.nn.Linear):
... torch.nn.init.xavier_uniform(m.weight)
>>> # model will now be deterministically initialized, since the seed is set.
>>> init_weights(model)
>>> trainer = Trainer(model=model, seed=42)
Note that the seed must also be passed to the Trainer, otherwise the Trainer
would generate a random seed based on the timestamp (see :func:`~.get_random_seed`).
.. testcleanup::
warnings.resetwarnings()
Attributes:
MAX_SEED (int): The maximum allowed seed, which is :math:`2^{32} - 1`.
"""
from __future__ import annotations
import os
import random
import textwrap
import time
import warnings
from typing import Any, Dict, List
import numpy as np
import torch
import torch.backends.cudnn
from composer.utils import dist
__all__ = [
"configure_deterministic_mode",
"get_random_seed",
"seed_all",
"get_rng_state",
"load_rng_state",
"MAX_SEED",
]
# seeds must be 32-bit unsigned integers
MAX_SEED = 2**32 - 1
def configure_deterministic_mode():
"""Configure PyTorch deterministic mode.
.. note::
When using the :class:`~composer.trainer.trainer.Trainer`, you can use the ``deterministic_mode`` flag
instead of invoking this function directly.
For example:
.. testsetup::
import warnings
warnings.filterwarnings(action="ignore", message="Deterministic mode is activated.")
.. doctest::
>>> trainer = Trainer(deterministic_mode=True)
.. testcleanup::
warnings.resetwarnings()
However, to configure deterministic mode for operations before the trainer is initialized, manually invoke this
function at the beginning of your training script.
.. note::
When training on a GPU, this function must be invoked before any CUDA operations.
.. note::
Deterministic mode degrades performance. Do not use outside of testing and debugging.
"""
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# See https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
# and https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
warnings.warn("Deterministic mode is activated. This will negatively impact performance.", category=UserWarning)
def get_random_seed() -> int:
"""Get a randomly created seed to use for seeding rng objects.
.. warning::
This random seed is NOT cryptographically secure.
Returns:
int: A random seed.
"""
rng = random.Random(int(time.time_ns())) # get a new RNG does not respect the current seed
seed = rng.randint(0, MAX_SEED)
assert seed >= 0 and seed <= MAX_SEED, "seed should be on this range"
return seed
def seed_all(seed: int):
"""Seed all rng objects.
.. note::
When using the :class:`~composer.trainer.trainer.Trainer`, you can use the ``seed`` parameter
instead of invoking this function directly.
For example:
.. doctest::
>>> trainer = Trainer(seed=42)
However, to configure the random seed for operations before the trainer is initialized, manually invoke this
function at the beginning of your training script.
Args:
seed (int): The random seed
"""
if seed < 0 or seed > MAX_SEED:
raise ValueError(f"Seed {seed} is invalid. It must be on [0; 2^32 - 1]")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.manual_seed may call manual_seed_all but calling it again here
# to make sure it gets called at least once
torch.cuda.manual_seed_all(seed)
def get_rng_state() -> List[Dict[str, Any]]:
"""The state of the RNG objects.
Returns:
List[Dict[str, Any]]: A list of RNG State Dicts, indexed by global rank.
"""
rng_state = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"torch": torch.random.get_rng_state(),
}
if torch.cuda.is_available() and torch.cuda.is_initialized():
# This will not be compatible with model parallelism
rng_state['cuda'] = torch.cuda.get_rng_state()
return dist.all_gather_object(rng_state)
def load_rng_state(rng_state_dicts: List[Dict[str, Any]]):
"""Restore the RNG state.
Args:
rng_state_dicts (List[Dict[str, Any]]): The list of RNG state dicts to restore,
as returned by :func:`get_rng_state`.
"""
if dist.get_world_size() > len(rng_state_dicts):
warnings.warn(
textwrap.dedent(f"""\
The current world size ({dist.get_world_size()} is greater than the number of RNG state(s) serialized
({len(rng_state_dicts)}). Only the first {len(rng_state_dicts)} rank(s) will have their RNG restored.
"""))
if dist.get_world_size() < len(rng_state_dicts):
warnings.warn(
textwrap.dedent(f"""\
The current world size ({dist.get_world_size()} is less than the number of RNG state(s) serialized
({len(rng_state_dicts)}). Only the first {dist.get_world_size()} RNG state(s) will be consumed;
the remaining will be ignored."""))
if dist.get_global_rank() < len(rng_state_dicts):
rng_state_dict = rng_state_dicts[dist.get_global_rank()]
torch.set_rng_state(rng_state_dict['torch'])
random.setstate(rng_state_dict['python'])
np.random.set_state(rng_state_dict['numpy'])
is_cuda_available = torch.cuda.is_available() and torch.cuda.is_initialized()
has_cuda_rng_state = "cuda" in rng_state_dict
if is_cuda_available and has_cuda_rng_state:
torch.cuda.set_rng_state(rng_state_dict['cuda'])
if is_cuda_available and not has_cuda_rng_state:
warnings.warn(
textwrap.dedent(f"""\
The checkpoint did not include the CUDA RNG state. The CUDA RNG will have a
non-deterministic state."""))
if not is_cuda_available and has_cuda_rng_state:
warnings.warn(
textwrap.dedent(f"""\
The checkpoint included CUDA RNG state, but CUDA is not being used.
As such, the CUDA RNG state will be ignored."""))
| 32.888393 | 119 | 0.659156 |
89faba85a27852d9fd81aa541f88d5aec23ef96a | 326 | py | Python | mikhailova_daria_dz_2/task_4.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | mikhailova_daria_dz_2/task_4.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | mikhailova_daria_dz_2/task_4.py | DariaShidova/1824_GB_Python_1 | 23ab1829260caa092cbf10b06f766fe95937cecf | [
"MIT"
] | null | null | null | worker_list = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА', 'токарь высшего разряда нИКОЛАй', 'директор '
'аэлита']
for i in worker_list:
name = (i.split()[-1]).capitalize()
print(f'Привет,{name:}!')
| 54.333333 | 117 | 0.46319 |
8482ecbe8e1310e26dd0dbda55a2ed57d4edef69 | 5,355 | py | Python | bin/find_basename_res.py | cirosantilli/python-utils | 3854d2c7973c6382f76e311423c219bccacb8c1d | [
"MIT"
] | 1 | 2018-10-04T15:29:04.000Z | 2018-10-04T15:29:04.000Z | bin/find_basename_res.py | cirosantilli/python-utils | 3854d2c7973c6382f76e311423c219bccacb8c1d | [
"MIT"
] | null | null | null | bin/find_basename_res.py | cirosantilli/python-utils | 3854d2c7973c6382f76e311423c219bccacb8c1d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
import sys
import os
import itertools
import termcolor
from cirosantilli import files
from cirosantilli import argparse_extras
if __name__ == '__main__':
parser = argparse_extras.ArgumentParser(
description="find recursivelly paths whose basenames match all given regexes",
epilog="""matches don't need to begin at start of basename.
EXAMPLES
%(f)s ab cd
finds all files containing both "as" and "df" on their basenames
sample finds:
0aB1cD.txt
%(f)s -0I ab cd
I: case is taken into consideration
0: output is null terminated
sample *not* finds:
0aB1cD.txt
%(f)s -n ab cd
-n: negates ab, but not cd
sample finds:
0cD1.txt
sample *not* finds:
0aB1cD.txt
> this contains ab
(%(f)s -n ab;%(f)s cd)
this is how you can do a OR operation at bash command line!
using and, not and or, you can get any expression.
sample finds:
0ab1cd.txt
01.txt
0cd1.txt
sample *not* finds:
0ab1.txt
TODO
add option to output full path
""")
#optional
argparse_extras.add_not_ignorecase(parser)
parser.add_argument('-m','--min-depth',
default=0,
action='store',
type=int,
help="min search depth. 1 makes search start from current dir",
)
parser.add_argument('-M','--max-depth',
default=float('inf'),
action='store',
type=int,
help="max search depth. 1 limits search to current dir",
)
parser.add_argument('-n','--negated',
default=[],
action='append',
help="if the following regex matches, exclude from output",
)
parser.add_argument('-t','--type',
default='a',
choices='adf',
help="type of files to select. a: all, d: dirs only, f: files only",
)
argparse_extras.add_null_separated_output(parser)
#positional
parser.add_argument('find',
nargs='*',
help="regexes to use to filter, prints output iff given strings match all regexes"
)
args = parser.parse_args()
#adapter
re_args = re.UNICODE
if args.ignorecase:
re_args = re_args | re.IGNORECASE
negated = args.negated
min_depth = args.min_depth
max_depth = args.max_depth
res = map(lambda r: re.compile( unicode(r, sys.stdin.encoding), re_args), args.find)
negated_res = map(lambda r: re.compile( unicode(r, sys.stdin.encoding), re_args), args.negated)
if args.null_separated_output:
output_separator = u"\0"
else:
output_separator = u"\n"
select_files = True
select_dirs = True
if args.type == 'f':
select_dirs = False
elif args.type == 'd':
select_files = False
encoding = 'utf-8' #TODO make encoding option
#act
stdout_isatty = sys.stdout.isatty()
for path in files.find(
u".",
min_depth=min_depth,
max_depth=max_depth,
):
isfile = os.path.isfile(path)
if ( isfile and select_files ) or ( not isfile and select_dirs ):
#initialize
head, bname = os.path.split(path)
accept=True
color_spans = [] #start end pairs span pairs to color in between
#find those that match
for reg in res:
if stdout_isatty: #must find all matches to color them later
matches = list(reg.finditer(bname))
if matches:
color_spans.extend(m.span() for m in matches)
else:
accept = False
break
else: #pipe: no coloring, so only find one match
if not reg.search(bname):
accept = False
break
#don't take if a negation matches
if accept:
for reg in negated_res:
if reg.search(bname):
accept = False
break
#print
if accept:
sys.stdout.write( (head + os.path.sep).encode(encoding) )
if stdout_isatty: #color
for i,c in itertools.izip(itertools.count(),bname):
printed = False
for color_span in color_spans:
if i >= color_span[0] and i < color_span[1]:
termcolor.cprint(
c,
'red',
'on_blue',
attrs=['bold'],
end=''
)
printed = True
break;
if not printed:
sys.stdout.write( c.encode(encoding) )
else: #don't color: may break grep, etc, since terminal color means extra control chars
sys.stdout.write( bname.encode(encoding) )
sys.stdout.write( output_separator )
| 27.603093 | 103 | 0.513352 |
c1720e86042a0d253d0197cb8adba45ac4fe395e | 8,661 | py | Python | kubernetes_asyncio/client/models/v1_persistent_volume_claim_condition.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_persistent_volume_claim_condition.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | 13 | 2021-04-12T02:03:48.000Z | 2022-03-28T02:08:46.000Z | kubernetes_asyncio/client/models/v1_persistent_volume_claim_condition.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.16.14
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1PersistentVolumeClaimCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_probe_time': 'datetime',
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_probe_time': 'lastProbeTime',
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_probe_time=None, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_probe_time = None
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_probe_time is not None:
self.last_probe_time = last_probe_time
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_probe_time(self):
"""Gets the last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
Last time we probed the condition. # noqa: E501
:return: The last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: datetime
"""
return self._last_probe_time
@last_probe_time.setter
def last_probe_time(self, last_probe_time):
"""Sets the last_probe_time of this V1PersistentVolumeClaimCondition.
Last time we probed the condition. # noqa: E501
:param last_probe_time: The last_probe_time of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: datetime
"""
self._last_probe_time = last_probe_time
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1PersistentVolumeClaimCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1PersistentVolumeClaimCondition. # noqa: E501
Human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1PersistentVolumeClaimCondition.
Human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1PersistentVolumeClaimCondition. # noqa: E501
Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. # noqa: E501
:return: The reason of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1PersistentVolumeClaimCondition.
Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. # noqa: E501
:param reason: The reason of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1PersistentVolumeClaimCondition. # noqa: E501
:return: The status of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1PersistentVolumeClaimCondition.
:param status: The status of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1PersistentVolumeClaimCondition. # noqa: E501
:return: The type of this V1PersistentVolumeClaimCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1PersistentVolumeClaimCondition.
:param type: The type of this V1PersistentVolumeClaimCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimCondition):
return True
return self.to_dict() != other.to_dict()
| 33.183908 | 232 | 0.637802 |
dfd9d4a7f2a59de9253d22d1fd14126cb7c5a7ae | 3,991 | py | Python | corridor_experiments/resqbot_2d_simulator.py | roni-permana-saputra/HiDO-MPC-ResQbot | eb6dfabf348bed412bdb0212c8cda032f4c184d4 | [
"MIT"
] | 1 | 2021-03-01T09:27:52.000Z | 2021-03-01T09:27:52.000Z | corridor_experiments/resqbot_2d_simulator.py | roni-permana-saputra/HiDO-MPC-ResQbot | eb6dfabf348bed412bdb0212c8cda032f4c184d4 | [
"MIT"
] | null | null | null | corridor_experiments/resqbot_2d_simulator.py | roni-permana-saputra/HiDO-MPC-ResQbot | eb6dfabf348bed412bdb0212c8cda032f4c184d4 | [
"MIT"
] | null | null | null | """
ResQbot 2D Simulator model
- Robot/Vehicle model 2D model
- Casualty 2D model
author: R Saputra
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import random
class Vehicle():
def __init__(self, ix, iy, iyaw):
self.x = ix
self.y = iy
self.yaw = iyaw
self._calc_contour()
self.calc_global_vehicle_contour()
def calc_global_vehicle_contour(self):
self.gvx = [(ix * np.cos(self.yaw) + iy * np.sin(self.yaw)) +
self.x for (ix, iy) in zip(self.c_x, self.c_y)]
self.gvy = [(ix * np.sin(self.yaw) - iy * np.cos(self.yaw)) +
self.y for (ix, iy) in zip(self.c_x, self.c_y)]
def _calc_contour(self):
self.c_x = [-0.3, -0.15, 0.15, 0.15,
-0.15, -0.15, 0.15, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 0.15, 0.15, -0.15,
-0.15, 0.15, -0.3, -0.3]
self.c_y = [-0.3, -0.3, -0.3, -0.35,
-0.35, -0.3, -0.3, -0.3,
-0.2, -0.2, -0.15, -0.15,
-0.2, -0.2, 0.15, 0.15,
0.2, 0.2, -0.2, -0.2,
0.3, 0.3, 0.35, 0.35,
0.3, 0.3, 0.3, -0.3]
class Casualty():
def __init__(self, hx, hy, bx, by):
self.x = hx
self.y = hy
# Calculate casualty orientation
self.yaw = math.atan2((by-hy),(bx-hx))
# Calculate the polygon
self._calc_casualty_contour()
self.calc_casualty_global_contour()
def calc_casualty_global_contour(self):
self.gcx = [(hx * np.cos(self.yaw) + hy * np.sin(self.yaw)) +
self.x for (hx, hy) in zip(self.cc_x, self.cc_y)]
self.gcy = [(hx * np.sin(self.yaw) - hy * np.cos(self.yaw)) +
self.y for (hx, hy) in zip(self.cc_x, self.cc_y)]
def _calc_casualty_contour(self):
self.cc_x = [-0.1, -0.05, 0.15, 0.2,
0.25, 0.25, 0.3, 0.8,
0.8, 1.4, 1.4, 1.375,
1.4, 1.4, 0.8, 0.8,
0.3, 0.25, 0.25, 0.2,
0.15,-0.05, -0.1]
self.cc_y = [0.0, 0.1, 0.1, 0.05,
0.05, 0.2, 0.25, 0.25,
0.15, 0.15, 0.05, 0.0,
-0.05, -0.15, -0.15, -0.25,
-0.25, -0.2, -0.05, -0.05,
-0.1, -0.1, 0.0]
class TargetVehicle():
def __init__(self, casualty):
distance = 1.75
self.x = casualty.x - distance*np.cos(casualty.yaw)
self.y = casualty.y - distance*np.sin(casualty.yaw)
self.yaw = casualty.yaw
# Calculate the polygon
self._calc_target_vehicle_contour()
self.calc_global_target_contour()
def calc_global_target_contour(self):
self.gtx = [(tx * np.cos(self.yaw) + ty * np.sin(self.yaw)) +
self.x for (tx, ty) in zip(self.tc_x, self.tc_y)]
self.gty = [(tx * np.sin(self.yaw) - ty * np.cos(self.yaw)) +
self.y for (tx, ty) in zip(self.tc_x, self.tc_y)]
def _calc_target_vehicle_contour(self):
self.tc_x = [-0.3, -0.15, 0.15, 0.15,
-0.15, -0.15, 0.15, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 1.35, 1.35, 0.3,
0.3, 0.15, 0.15, -0.15,
-0.15, 0.15, -0.3, -0.3]
self.tc_y = [-0.3, -0.3, -0.3, -0.35,
-0.35, -0.3, -0.3, -0.3,
-0.2, -0.2, -0.15, -0.15,
-0.2, -0.2, 0.15, 0.15,
0.2, 0.2, -0.2, -0.2,
0.3, 0.3, 0.35, 0.35,
0.3, 0.3, 0.3, -0.3]
def main():
print("start!!")
print("done!!")
if __name__ == '__main__':
main() | 36.281818 | 69 | 0.432974 |
421e652e0c67555b1a75ce3ea7e253e2f6e3eb6a | 103 | py | Python | fastapi_rest_jsonapi/data/__init__.py | Zenor27/fastapi-rest-jsonapi | 1c6eaad0791949bbaf9f4032fb7ecd483e80a02a | [
"MIT"
] | 2 | 2022-03-01T00:59:04.000Z | 2022-03-03T06:17:51.000Z | fastapi_rest_jsonapi/data/__init__.py | Zenor27/fastapi-rest-jsonapi | 1c6eaad0791949bbaf9f4032fb7ecd483e80a02a | [
"MIT"
] | 9 | 2022-01-16T15:47:35.000Z | 2022-03-28T18:47:18.000Z | fastapi_rest_jsonapi/data/__init__.py | Zenor27/fastapi-rest-jsonapi | 1c6eaad0791949bbaf9f4032fb7ecd483e80a02a | [
"MIT"
] | null | null | null | # flake8: noqa
from .data_layer import DataLayer
from .sqlachemy_data_layer import SQLAlchemyDataLayer
| 25.75 | 53 | 0.854369 |
a9d6c998a509e7f2491b1fb562acd0fdaf57361f | 12,089 | py | Python | google/appengine/api/memcache/memcache_stub.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/api/memcache/memcache_stub.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | google/appengine/api/memcache/memcache_stub.py | luduvigo/app-blog-code | 2de41573955f8dfb5f6d964d3cae2bbee6db2f9a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the memcache API, keeping all data in process memory."""
import logging
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import memcache
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors
MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
MAX_REQUEST_SIZE = 32 << 20
class CacheEntry(object):
"""An entry in the cache."""
def __init__(self, value, expiration, flags, cas_id, gettime):
"""Initializer.
Args:
value: String containing the data for this entry.
expiration: Number containing the expiration time or offset in seconds
for this entry.
flags: Opaque flags used by the memcache implementation.
cas_id: Unique Compare-And-Set ID.
gettime: Used for testing. Function that works like time.time().
"""
assert isinstance(value, basestring)
assert len(value) <= memcache.MAX_VALUE_SIZE
assert isinstance(expiration, (int, long))
self._gettime = gettime
self.value = value
self.flags = flags
self.cas_id = cas_id
self.created_time = self._gettime()
self.will_expire = expiration != 0
self.locked = False
self._SetExpiration(expiration)
def _SetExpiration(self, expiration):
"""Sets the expiration for this entry.
Args:
expiration: Number containing the expiration time or offset in seconds
for this entry. If expiration is above one month, then it's considered
an absolute time since the UNIX epoch.
"""
if expiration > (86400 * 30):
self.expiration_time = expiration
else:
self.expiration_time = self._gettime() + expiration
def CheckExpired(self):
"""Returns True if this entry has expired; False otherwise."""
return self.will_expire and self._gettime() >= self.expiration_time
def ExpireAndLock(self, timeout):
"""Marks this entry as deleted and locks it for the expiration time.
Used to implement memcache's delete timeout behavior.
Args:
timeout: Parameter originally passed to memcache.delete or
memcache.delete_multi to control deletion timeout.
"""
self.will_expire = True
self.locked = True
self._SetExpiration(timeout)
def CheckLocked(self):
"""Returns True if this entry was deleted but has not yet timed out."""
return self.locked and not self.CheckExpired()
class MemcacheServiceStub(apiproxy_stub.APIProxyStub):
"""Python only memcache service stub.
This stub keeps all data in the local process' memory, not in any
external servers.
"""
def __init__(self, gettime=time.time, service_name='memcache'):
"""Initializer.
Args:
gettime: time.time()-like function used for testing.
service_name: Service name expected for all calls.
"""
super(MemcacheServiceStub, self).__init__(service_name,
max_request_size=MAX_REQUEST_SIZE)
self._next_cas_id = 1
self._gettime = lambda: int(gettime())
self._ResetStats()
self._the_cache = {}
def _ResetStats(self):
"""Resets statistics information.
Must be called while the current thread holds self._mutex (with an exception
for __init__).
"""
self._hits = 0
self._misses = 0
self._byte_hits = 0
self._cache_creation_time = self._gettime()
@apiproxy_stub.Synchronized
def _GetKey(self, namespace, key):
"""Retrieves a CacheEntry from the cache if it hasn't expired.
Does not take deletion timeout into account.
Args:
namespace: The namespace that keys are stored under.
key: The key to retrieve from the cache.
Returns:
The corresponding CacheEntry instance, or None if it was not found or
has already expired.
"""
namespace_dict = self._the_cache.get(namespace, None)
if namespace_dict is None:
return None
entry = namespace_dict.get(key, None)
if entry is None:
return None
elif entry.CheckExpired():
del namespace_dict[key]
return None
else:
return entry
@apiproxy_stub.Synchronized
def _Dynamic_Get(self, request, response):
"""Implementation of MemcacheService::Get().
Args:
request: A MemcacheGetRequest.
response: A MemcacheGetResponse.
"""
namespace = request.name_space()
keys = set(request.key_list())
for key in keys:
entry = self._GetKey(namespace, key)
if entry is None or entry.CheckLocked():
self._misses += 1
continue
self._hits += 1
self._byte_hits += len(entry.value)
item = response.add_item()
item.set_key(key)
item.set_value(entry.value)
item.set_flags(entry.flags)
if request.for_cas():
item.set_cas_id(entry.cas_id)
@apiproxy_stub.Synchronized
def _Dynamic_Set(self, request, response):
"""Implementation of MemcacheService::Set().
Args:
request: A MemcacheSetRequest.
response: A MemcacheSetResponse.
"""
namespace = request.name_space()
for item in request.item_list():
key = item.key()
set_policy = item.set_policy()
old_entry = self._GetKey(namespace, key)
set_status = MemcacheSetResponse.NOT_STORED
if ((set_policy == MemcacheSetRequest.SET) or
(set_policy == MemcacheSetRequest.ADD and old_entry is None) or
(set_policy == MemcacheSetRequest.REPLACE and old_entry is not None)):
if (old_entry is None or
set_policy == MemcacheSetRequest.SET
or not old_entry.CheckLocked()):
set_status = MemcacheSetResponse.STORED
elif (set_policy == MemcacheSetRequest.CAS and item.for_cas() and
item.has_cas_id()):
if old_entry is None or old_entry.CheckLocked():
set_status = MemcacheSetResponse.NOT_STORED
elif old_entry.cas_id != item.cas_id():
set_status = MemcacheSetResponse.EXISTS
else:
set_status = MemcacheSetResponse.STORED
if set_status == MemcacheSetResponse.STORED:
if namespace not in self._the_cache:
self._the_cache[namespace] = {}
self._the_cache[namespace][key] = CacheEntry(item.value(),
item.expiration_time(),
item.flags(),
self._next_cas_id,
gettime=self._gettime)
self._next_cas_id += 1
response.add_set_status(set_status)
@apiproxy_stub.Synchronized
def _Dynamic_Delete(self, request, response):
"""Implementation of MemcacheService::Delete().
Args:
request: A MemcacheDeleteRequest.
response: A MemcacheDeleteResponse.
"""
namespace = request.name_space()
for item in request.item_list():
key = item.key()
entry = self._GetKey(namespace, key)
delete_status = MemcacheDeleteResponse.DELETED
if entry is None:
delete_status = MemcacheDeleteResponse.NOT_FOUND
elif item.delete_time() == 0:
del self._the_cache[namespace][key]
else:
entry.ExpireAndLock(item.delete_time())
response.add_delete_status(delete_status)
@apiproxy_stub.Synchronized
def _internal_increment(self, namespace, request):
"""Internal function for incrementing from a MemcacheIncrementRequest.
Args:
namespace: A string containing the namespace for the request, if any.
Pass an empty string if there is no namespace.
request: A MemcacheIncrementRequest instance.
Returns:
An integer or long if the offset was successful, None on error.
"""
key = request.key()
entry = self._GetKey(namespace, key)
if entry is None:
if not request.has_initial_value():
return None
if namespace not in self._the_cache:
self._the_cache[namespace] = {}
flags = 0
if request.has_initial_flags():
flags = request.initial_flags()
self._the_cache[namespace][key] = CacheEntry(str(request.initial_value()),
expiration=0,
flags=flags,
cas_id=self._next_cas_id,
gettime=self._gettime)
self._next_cas_id += 1
entry = self._GetKey(namespace, key)
assert entry is not None
try:
old_value = long(entry.value)
if old_value < 0:
raise ValueError
except ValueError:
logging.error('Increment/decrement failed: Could not interpret '
'value for key = "%s" as an unsigned integer.', key)
return None
delta = request.delta()
if request.direction() == MemcacheIncrementRequest.DECREMENT:
delta = -delta
new_value = max(old_value + delta, 0) % (2**64)
entry.value = str(new_value)
return new_value
def _Dynamic_Increment(self, request, response):
"""Implementation of MemcacheService::Increment().
Args:
request: A MemcacheIncrementRequest.
response: A MemcacheIncrementResponse.
"""
namespace = request.name_space()
new_value = self._internal_increment(namespace, request)
if new_value is None:
raise apiproxy_errors.ApplicationError(
memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR)
response.set_new_value(new_value)
@apiproxy_stub.Synchronized
def _Dynamic_BatchIncrement(self, request, response):
"""Implementation of MemcacheService::BatchIncrement().
Args:
request: A MemcacheBatchIncrementRequest.
response: A MemcacheBatchIncrementResponse.
"""
namespace = request.name_space()
for request_item in request.item_list():
new_value = self._internal_increment(namespace, request_item)
item = response.add_item()
if new_value is None:
item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)
else:
item.set_increment_status(MemcacheIncrementResponse.OK)
item.set_new_value(new_value)
@apiproxy_stub.Synchronized
def _Dynamic_FlushAll(self, request, response):
"""Implementation of MemcacheService::FlushAll().
Args:
request: A MemcacheFlushRequest.
response: A MemcacheFlushResponse.
"""
self._the_cache.clear()
self._ResetStats()
@apiproxy_stub.Synchronized
def _Dynamic_Stats(self, request, response):
"""Implementation of MemcacheService::Stats().
Args:
request: A MemcacheStatsRequest.
response: A MemcacheStatsResponse.
"""
stats = response.mutable_stats()
stats.set_hits(self._hits)
stats.set_misses(self._misses)
stats.set_byte_hits(self._byte_hits)
items = 0
total_bytes = 0
for namespace in self._the_cache.itervalues():
items += len(namespace)
for entry in namespace.itervalues():
total_bytes += len(entry.value)
stats.set_items(items)
stats.set_bytes(total_bytes)
stats.set_oldest_item_age(self._gettime() - self._cache_creation_time)
| 30.918159 | 80 | 0.67152 |
c2bcd606f66a4ffe573427b5838b9b49f35ebc15 | 3,517 | py | Python | flash/image/detection/model.py | LoopGlitch26/lightning-flash | a1bece4361a7cb5449715bef9975696e96c5f9ae | [
"Apache-2.0"
] | null | null | null | flash/image/detection/model.py | LoopGlitch26/lightning-flash | a1bece4361a7cb5449715bef9975696e96c5f9ae | [
"Apache-2.0"
] | null | null | null | flash/image/detection/model.py | LoopGlitch26/lightning-flash | a1bece4361a7cb5449715bef9975696e96c5f9ae | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Mapping, Optional, Type, Union
import torch
from torch.optim import Optimizer
from flash.core.adapter import AdapterTask
from flash.core.data.process import Serializer
from flash.core.registry import FlashRegistry
from flash.image.detection.backbones import OBJECT_DETECTION_HEADS
class ObjectDetector(AdapterTask):
"""The ``ObjectDetector`` is a :class:`~flash.Task` for detecting objects in images. For more details, see
:ref:`object_detection`.
Args:
num_classes: the number of classes for detection, including background
model: a string of :attr`_models`. Defaults to 'fasterrcnn'.
backbone: Pretrained backbone CNN architecture. Constructs a model with a
ResNet-50-FPN backbone when no backbone is specified.
fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.
pretrained: if true, returns a model pre-trained on COCO train2017
pretrained_backbone: if true, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers: number of trainable resnet layers starting from final block.
Only applicable for `fasterrcnn`.
loss: the function(s) to update the model with. Has no effect for torchvision detection models.
metrics: The provided metrics. All metrics here will be logged to progress bar and the respective logger.
Changing this argument currently has no effect.
optimizer: The optimizer to use for training. Can either be the actual class or the class name.
pretrained: Whether the model from torchvision should be loaded with it's pretrained weights.
Has no effect for custom models.
learning_rate: The learning rate to use for training
"""
heads: FlashRegistry = OBJECT_DETECTION_HEADS
required_extras: str = "image"
def __init__(
self,
num_classes: int,
backbone: Optional[str] = "resnet18_fpn",
head: Optional[str] = "retinanet",
pretrained: bool = True,
optimizer: Type[Optimizer] = torch.optim.Adam,
learning_rate: float = 5e-4,
serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,
**kwargs: Any,
):
self.save_hyperparameters()
metadata = self.heads.get(head, with_metadata=True)
adapter = metadata["metadata"]["adapter"].from_task(
self,
num_classes=num_classes,
backbone=backbone,
head=head,
pretrained=pretrained,
**kwargs,
)
super().__init__(
adapter,
learning_rate=learning_rate,
optimizer=optimizer,
serializer=serializer,
)
def _ci_benchmark_fn(self, history: List[Dict[str, Any]]) -> None:
"""This function is used only for debugging usage with CI."""
# todo
| 40.895349 | 113 | 0.688371 |
bf9e05ed3e461f716ab030eb22ceaa72207b9763 | 4,264 | py | Python | object_detection.py | rktayal/object-detection-demo | 3bc69ecdfbf9ac3f6d6f34f08a87c4b02c5eb910 | [
"MIT"
] | 1 | 2020-04-12T16:12:52.000Z | 2020-04-12T16:12:52.000Z | object_detection.py | rktayal/object-detection-demo | 3bc69ecdfbf9ac3f6d6f34f08a87c4b02c5eb910 | [
"MIT"
] | null | null | null | object_detection.py | rktayal/object-detection-demo | 3bc69ecdfbf9ac3f6d6f34f08a87c4b02c5eb910 | [
"MIT"
] | 4 | 2019-12-26T07:55:12.000Z | 2020-09-29T01:52:58.000Z | """
The script loads the model into memory,
performs detections on your webcam
"""
import os
import sys
import cv2
import argparse
#import utils as ut
import numpy as np
import tensorflow as tf
#from PIL import Image
#from io import StringIO
#from collections import defaultdict
#from matplotlib import pyplot as plt
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from imutil.app_utils import FPS
def load_model(PATH_TO_CKPT):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
session = tf.Session(graph=detection_graph)
return detection_graph, session
def load_label_map(PATH_TO_LABELS, NUM_CLASSES):
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
if __name__ == "__main__":
# Read a video
ap = argparse.ArgumentParser()
ap.add_argument('-n', "--num-frames", type=int, default=100,
help="# of frames to loop over FPS test")
ap.add_argument('-d', "--display", type=int, default=-1,
help="whether or not frame should be displayed")
args = vars(ap.parse_args())
cap = cv2.VideoCapture(0) # change only if you have more than one webcams
PATH_TO_CKPT = "./model/frozen_inference_graph.pb"
PATH_TO_LABELS = "./model/mscoco_label_map.pbtxt"
NUM_CLASSES = 90
detection_graph, sess = load_model(PATH_TO_CKPT)
category_index = load_label_map(PATH_TO_LABELS, NUM_CLASSES)
with detection_graph.as_default():
fps = FPS().start()
while fps._numFrames < args["num_frames"]:
# Read frame from camera
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract Detection Boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract Detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract Detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detectionsd
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual Detection
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict = {image_tensor: image_np_expanded})
if args["display"] > 0:
# Visualization of the result of the detection
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
# Display output
cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xff == ord('q'):
cv2.destroyAllWindows()
break
fps.update()
fps.stop()
print ("[INFO] elapsed time : {:.2f}".format(fps.elapsed()))
print ("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# cleanup
sess.close()
| 36.135593 | 95 | 0.640478 |
532c944cc78b9bf07d6b48281ab92c157f32db60 | 418 | py | Python | schnitzler_zeitungen/wsgi.py | martinantonmueller/schnitzler-zeitungen | f677fe0ed0e92f2fa054e43f5a3c88c1175f5ab5 | [
"MIT"
] | null | null | null | schnitzler_zeitungen/wsgi.py | martinantonmueller/schnitzler-zeitungen | f677fe0ed0e92f2fa054e43f5a3c88c1175f5ab5 | [
"MIT"
] | 9 | 2021-03-19T01:56:45.000Z | 2022-03-29T08:12:30.000Z | schnitzler_zeitungen/wsgi.py | martinantonmueller/schnitzler-zeitungen | f677fe0ed0e92f2fa054e43f5a3c88c1175f5ab5 | [
"MIT"
] | 1 | 2019-10-03T06:20:36.000Z | 2019-10-03T06:20:36.000Z | """
WSGI config for schnitzler_zeitungen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "schnitzler_zeitungen.settings")
application = get_wsgi_application()
| 24.588235 | 80 | 0.799043 |
8740e9e4fc94ab140ac814e71eaa063136310a28 | 1,310 | py | Python | app/core/tests/tests_admin.py | NatikaKV/recipe-app-api | c5c0b1fc9d49aaf1fbe4547a485919a40e0ec1e0 | [
"MIT"
] | null | null | null | app/core/tests/tests_admin.py | NatikaKV/recipe-app-api | c5c0b1fc9d49aaf1fbe4547a485919a40e0ec1e0 | [
"MIT"
] | null | null | null | app/core/tests/tests_admin.py | NatikaKV/recipe-app-api | c5c0b1fc9d49aaf1fbe4547a485919a40e0ec1e0 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@email.com',
password='password'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that create users page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.95122 | 68 | 0.638931 |
d95acb524992ab1ca2a3395a52d48a793ab3f132 | 270 | py | Python | djangofiltertest/djangofiltertest/apps/posts_areas/api_v1/views.py | gonzaloamadio/django-filter-test | 7b9dbc36ca248e2113deaac03e824b123a31a4ba | [
"MIT"
] | null | null | null | djangofiltertest/djangofiltertest/apps/posts_areas/api_v1/views.py | gonzaloamadio/django-filter-test | 7b9dbc36ca248e2113deaac03e824b123a31a4ba | [
"MIT"
] | 3 | 2020-06-05T19:44:12.000Z | 2022-03-08T21:09:34.000Z | djangofiltertest/djangofiltertest/apps/posts_areas/api_v1/views.py | gonzaloamadio/django-filter-test | 7b9dbc36ca248e2113deaac03e824b123a31a4ba | [
"MIT"
] | null | null | null | from posts_areas.api_v1.serializers import PostAreaSerializer
from posts_areas.models import PostArea
from djangofiltertest.libs.views import APIViewSet
class PostAreaViewSet(APIViewSet):
queryset = PostArea.objects.all()
serializer_class = PostAreaSerializer
| 30 | 61 | 0.837037 |
86128c070ba9381cd37db881012a88b93bda1cae | 6,744 | py | Python | CaptionGenerator.py | ttengwang/ECHR | aa55a3e00d45f65d7dc01a392a7895d4b7a34163 | [
"MIT"
] | 8 | 2020-10-21T13:09:41.000Z | 2021-01-14T11:25:42.000Z | CaptionGenerator.py | ttengwang/ECHR | aa55a3e00d45f65d7dc01a392a7895d4b7a34163 | [
"MIT"
] | 1 | 2020-12-01T12:44:29.000Z | 2020-12-05T10:37:32.000Z | CaptionGenerator.py | ttengwang/ECHR | aa55a3e00d45f65d7dc01a392a7895d4b7a34163 | [
"MIT"
] | 3 | 2020-10-20T08:59:22.000Z | 2021-01-10T02:42:51.000Z | import pdb
import torch
from torch import nn
from torch.autograd import Variable
import models
import time
class CaptionGenerator(nn.Module):
def __init__(self, opt):
super(CaptionGenerator, self).__init__()
self.opt = opt
self.change_context_dim()
if 'TSRM' in opt.fusion_model and 'ER' in opt.event_context_type:
self.fusion_model = models.setup_fusion(opt)
self.lm_model = models.setup_lm(opt)
def forward(self, tap_feats, c3d_feats, lda_feats, lm_labels, ind_select_list, soi_select_list, mode='train'):
'''
VL+VC+VH, EC+EH+ER1+ER2+ER3, CL+CC+CH
'''
t0=time.time()
video = self.get_video_context( tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list)
event = self.get_event_context( tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list)
clip, clip_mask = self.get_clip_context( tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list)
t1=time.time()
if mode=='train':
pred_captions = self.lm_model(video, event, clip, clip_mask, lm_labels)
return pred_captions
elif mode =='train_rl':
gen_result, sample_logprobs = self.lm_model.sample(video, event,clip, clip_mask,opt={'sample_max': 0})
self.lm_model.eval()
with torch.no_grad():
greedy_res, _ = self.lm_model.sample(video, event,clip, clip_mask)
self.lm_model.train()
return gen_result, sample_logprobs, greedy_res
elif mode == 'eval':
seq, cg_prob = self.lm_model.sample(video, event, clip, clip_mask)
t2= time.time()
print(t1 - t0, t2 - t1)
return seq, cg_prob
elif mode == '1stage':
pred_captions, cg_feats = self.lm_model(video, event,clip, clip_mask, lm_labels, need_ext_data=False)
return pred_captions, cg_feats
elif mode == '1stage_eval':
seq, cg_prob, cg_feats = self.lm_model.sample(video, event, clip, clip_mask, need_ext_data=False)
return seq, cg_prob, cg_feats
elif mode == '1stage_ATTnorm':
pred_captions, cg_feats, (att_weights, att_mask) = self.lm_model(video, event, clip, clip_mask, lm_labels, need_ext_data=True)
return pred_captions, cg_feats, (att_weights, att_mask)
def change_context_dim(self):
opt = self.opt
video_context_dim = 0
if 'VL' in opt.video_context_type:
video_context_dim += opt.lda_dim
if 'VC' in opt.video_context_type:
video_context_dim += opt.video_dim
if 'VH' in opt.video_context_type:
video_context_dim += opt.hidden_dim
event_context_dim = 0
if 'ER' in opt.event_context_type:
event_context_dim = opt.d_o
else:
if 'EC' in opt.event_context_type:
event_context_dim += opt.video_dim
if 'EH' in opt.event_context_type:
event_context_dim += opt.hidden_dim
clip_context_dim = 0
if 'CC' in opt.clip_context_type:
clip_context_dim += opt.video_dim
if 'CH' in opt.clip_context_type:
clip_context_dim += opt.hidden_dim
opt.video_context_dim = video_context_dim
opt.event_context_dim = event_context_dim
opt.clip_context_dim = clip_context_dim
def get_video_context(self, tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list):
opt = self.opt
video_feats = []
if 'VL' in opt.video_context_type:
video_feats.append(lda_feats)
if 'VC' in opt.video_context_type:
video_feats.append(c3d_feats.mean(0))
if 'VH' in opt.video_context_type:
video_feats.append(tap_feats.mean(0))
if video_feats:
video_feat = torch.cat(video_feats, 0)
else:
video_feat = None
return video_feat
def get_event_context(self, tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list):
opt=self.opt
event_feats =[]
if 'EC' in opt.event_context_type or 'ER1' in opt.event_context_type or 'ER3' in opt.event_context_type:
pooled = []
for i, soi in enumerate(soi_select_list):
selected = c3d_feats[soi[0]:soi[1]]
pooled.append(selected.mean(0).unsqueeze(0))
EC = torch.cat(pooled, 0)
event_feats.append(EC)
if 'ER1' in opt.event_context_type:
event_feat = self.fusion_model(EC, soi_select_list)
return event_feat
if 'EH' in opt.event_context_type or 'ER2' in opt.event_context_type or 'ER3' in opt.event_context_type:
EH = tap_feats[ind_select_list]
event_feats.append(EH)
if 'ER2' in opt.event_context_type:
event_feat = self.fusion_model(EH, soi_select_list)
return event_feat
if 'ER3' in opt.event_context_type:
ECH = torch.cat((EC,EH),1)
event_feat = self.fusion_model(ECH, soi_select_list)
return event_feat
if event_feats:
event_feat = torch.cat(event_feats, 0)
else:
event_feat = None
return event_feat
def get_clip_context(self, tap_feats, c3d_feats, lda_feats, ind_select_list, soi_select_list):
opt = self.opt
max_att_len = max([(s[1]-s[0]) for s in soi_select_list])
clip_mask = Variable(c3d_feats.new(len(soi_select_list), max_att_len).zero_())
clip_feats = []
if 'CC' in opt.clip_context_type:
CC = Variable(c3d_feats.new(len(soi_select_list), max_att_len, opt.video_dim).zero_())
for i,soi in enumerate(soi_select_list):
selected = c3d_feats[soi[0]:soi[1]]
CC[i,:len(selected),:] = selected
clip_mask[i, :len(selected)] = 1
clip_feats.append(CC)
if 'CH' in opt.clip_context_type:
CH = Variable(c3d_feats.new(len(soi_select_list), max_att_len, opt.hidden_dim).zero_())
for i,soi in enumerate(soi_select_list):
selected = tap_feats[soi[0]:soi[1]]
CH[i,:len(selected),:] = selected
clip_mask[i, :len(selected)] = 1
clip_feats.append(CH)
if clip_feats:
clip_feat = torch.cat(clip_feats, 2)
else:
clip_feat = None
clip_mask = None
return clip_feat, clip_mask
| 39.905325 | 139 | 0.602758 |
74d49678a32b62f0bee837f70720184ea5665bf4 | 294 | py | Python | src/api/tables/semester_info.py | 123joshuawu/yacs.n | 5e5b4e4447953a6e88916f38a3c8720264cfc6f9 | [
"MIT"
] | 20 | 2020-02-29T19:03:31.000Z | 2022-02-18T21:13:12.000Z | src/api/tables/semester_info.py | 123joshuawu/yacs.n | 5e5b4e4447953a6e88916f38a3c8720264cfc6f9 | [
"MIT"
] | 465 | 2020-02-29T19:08:18.000Z | 2022-03-18T22:21:49.000Z | src/api/tables/semester_info.py | 123joshuawu/yacs.n | 5e5b4e4447953a6e88916f38a3c8720264cfc6f9 | [
"MIT"
] | 19 | 2020-02-29T01:22:23.000Z | 2022-02-14T01:47:09.000Z | from sqlalchemy import Column, PrimaryKeyConstraint
from sqlalchemy.dialects.postgresql import VARCHAR, BOOLEAN
from .database import Base
class SemesterInfo(Base):
__tablename__ = "semester_info"
semester = Column(VARCHAR(length=255), primary_key=True)
public = Column(BOOLEAN)
| 26.727273 | 60 | 0.785714 |
b3b45a75a79b15e182ec1477f241812a48179257 | 13,723 | py | Python | scripts/common.py | danigrim/oppia | e54f5cef31bb8c9e2802ba2b62695b4352774bf9 | [
"Apache-2.0"
] | 1 | 2020-06-14T23:20:14.000Z | 2020-06-14T23:20:14.000Z | scripts/common.py | danigrim/oppia | e54f5cef31bb8c9e2802ba2b62695b4352774bf9 | [
"Apache-2.0"
] | null | null | null | scripts/common.py | danigrim/oppia | e54f5cef31bb8c9e2802ba2b62695b4352774bf9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions and classes used by multiple Python scripts."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import contextlib
import getpass
import os
import re
import socket
import subprocess
import python_utils
import release_constants
RELEASE_BRANCH_NAME_PREFIX = 'release-'
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join(CURR_DIR, 'third_party')
GOOGLE_APP_ENGINE_HOME = os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine')
GOOGLE_CLOUD_SDK_HOME = os.path.join(
OPPIA_TOOLS_DIR, 'google-cloud-sdk-251.0.0', 'google-cloud-sdk')
NODE_PATH = os.path.join(OPPIA_TOOLS_DIR, 'node-10.15.3')
NODE_MODULES_PATH = os.path.join(CURR_DIR, 'node_modules')
FRONTEND_DIR = os.path.join(CURR_DIR, 'core', 'templates', 'dev', 'head')
YARN_PATH = os.path.join(OPPIA_TOOLS_DIR, 'yarn-v1.17.3')
# Add path for node which is required by the node_modules.
os.environ['PATH'] = (
'%s/bin:' % NODE_PATH + '%s/bin:' % YARN_PATH + os.environ['PATH'])
def run_cmd(cmd_tokens):
"""Runs the command and returns the output.
Raises subprocess.CalledProcessError upon failure.
Args:
cmd_tokens: list(str). The list of command tokens to execute.
Returns:
str. The output of the command.
"""
return subprocess.check_output(cmd_tokens).strip()
def ensure_directory_exists(d):
"""Creates the given directory if it does not already exist."""
if not os.path.exists(d):
os.makedirs(d)
def require_cwd_to_be_oppia(allow_deploy_dir=False):
"""Ensures that the current working directory ends in 'oppia'.
If allow_deploy_dir is True, this also allows the cwd to be a directory
called 'deploy-*' which is a sibling of the oppia/ directory.
"""
is_oppia_dir = os.getcwd().endswith('oppia')
current_dirname = os.path.basename(os.path.normpath(os.getcwd()))
is_deploy_dir = (
current_dirname.startswith('deploy-') and
os.path.isdir(os.path.join(os.getcwd(), '..', 'oppia')))
if is_oppia_dir or (allow_deploy_dir and is_deploy_dir):
return
raise Exception('Please run this script from the oppia/ directory.')
def open_new_tab_in_browser_if_possible(url):
"""Opens the given URL in a new browser tab, if possible."""
browser_cmds = ['chromium-browser', 'google-chrome', 'firefox']
for cmd in browser_cmds:
if subprocess.call(['which', cmd]) == 0:
subprocess.call([cmd, url])
return
python_utils.PRINT(
'******************************************************************')
python_utils.PRINT(
'WARNING: Unable to open browser. Please manually open the following')
python_utils.PRINT('URL in a browser window, then press Enter to confirm.')
python_utils.PRINT('')
python_utils.PRINT(' %s' % url)
python_utils.PRINT('')
python_utils.PRINT(
'NOTE: To get rid of this message, open scripts/common.py and fix')
python_utils.PRINT(
'the function open_new_tab_in_browser_if_possible() to work on your')
python_utils.PRINT('system.')
python_utils.INPUT()
def get_remote_alias(remote_url):
"""Finds the correct alias for the given remote repository URL."""
git_remote_output = subprocess.check_output(
['git', 'remote', '-v']).split('\n')
remote_alias = None
for line in git_remote_output:
if remote_url in line:
remote_alias = line.split()[0]
if remote_alias is None:
raise Exception(
'ERROR: There is no existing remote alias for the %s repo.'
% remote_url)
return remote_alias
def verify_local_repo_is_clean():
"""Checks that the local Git repo is clean."""
git_status_output = subprocess.check_output(
['git', 'status']).strip().split('\n')
branch_is_clean_message_1 = 'nothing to commit, working directory clean'
branch_is_clean_message_2 = 'nothing to commit, working tree clean'
if (
not branch_is_clean_message_1 in git_status_output and
not branch_is_clean_message_2 in git_status_output):
raise Exception(
'ERROR: This script should be run from a clean branch.')
def get_current_branch_name():
"""Get the current branch name."""
git_status_output = subprocess.check_output(
['git', 'status']).strip().split('\n')
branch_message_prefix = 'On branch '
git_status_first_line = git_status_output[0]
assert git_status_first_line.startswith(branch_message_prefix)
return git_status_first_line[len(branch_message_prefix):]
def is_current_branch_a_release_branch():
"""Returns whether the current branch is a release branch.
Returns:
bool. Whether the current branch is a release branch.
"""
current_branch_name = get_current_branch_name()
return bool(re.match(r'release-\d+\.\d+\.\d+$', current_branch_name))
def verify_current_branch_name(expected_branch_name):
"""Checks that the user is on the expected branch."""
if get_current_branch_name() != expected_branch_name:
raise Exception(
'ERROR: This script can only be run from the "%s" branch.' %
expected_branch_name)
def ensure_release_scripts_folder_exists_and_is_up_to_date():
"""Checks that the release-scripts folder exists and is up-to-date."""
parent_dirpath = os.path.join(os.getcwd(), os.pardir)
release_scripts_dirpath = os.path.join(parent_dirpath, 'release-scripts')
# If the release-scripts folder does not exist, set it up.
if not os.path.isdir(release_scripts_dirpath):
with CD(parent_dirpath):
# Taken from the "Check your SSH section" at
# https://help.github.com/articles/error-repository-not-found/
_, stderr = subprocess.Popen(
['ssh', '-T', 'git@github.com'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if 'You\'ve successfully authenticated' not in stderr:
raise Exception(
'You need SSH access to GitHub. See the '
'"Check your SSH access" section here and follow the '
'instructions: '
'https://help.github.com/articles/'
'error-repository-not-found/#check-your-ssh-access')
subprocess.call([
'git', 'clone',
'git@github.com:oppia/release-scripts.git'])
with CD(release_scripts_dirpath):
verify_local_repo_is_clean()
verify_current_branch_name('master')
# Update the local repo.
remote_alias = get_remote_alias(
'git@github.com:oppia/release-scripts.git')
subprocess.call(['git', 'pull', remote_alias])
def is_port_open(port):
"""Checks if a process is listening to the port.
Args:
port: int. The port number.
Return:
bool. True if port is open else False.
"""
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
return bool(not s.connect_ex(('localhost', port)))
def recursive_chown(path, uid, gid):
"""Changes the owner and group id of all files in a path to the numeric
uid and gid.
Args:
path: str. The path for which owner id and group id need to be setup.
uid: int. Owner ID to be set.
gid: int. Group ID to be set.
"""
os.chown(path, uid, gid)
for root, directories, filenames in os.walk(path):
for directory in directories:
os.chown(os.path.join(root, directory), uid, gid)
for filename in filenames:
os.chown(os.path.join(root, filename), uid, gid)
def recursive_chmod(path, mode):
"""Changes the mode of path to the passed numeric mode.
Args:
path: str. The path for which mode would be set.
mode: int. The mode to be set.
"""
os.chmod(path, mode)
for root, directories, filenames in os.walk(path):
for directory in directories:
os.chmod(os.path.join(root, directory), mode)
for filename in filenames:
os.chmod(os.path.join(root, filename), mode)
def print_each_string_after_two_new_lines(strings):
"""Prints the given strings, separating adjacent strings with two newlines.
Args:
strings: list(str). The strings to print.
"""
for string in strings:
python_utils.PRINT('%s\n' % string)
def install_npm_library(library_name, version, path):
"""Installs the npm library after ensuring its not already installed.
Args:
library_name: str. The library name.
version: str. The library version.
path: str. The installation path for the library.
"""
python_utils.PRINT(
'Checking whether %s is installed in %s' % (library_name, path))
if not os.path.exists(os.path.join(NODE_MODULES_PATH, library_name)):
python_utils.PRINT('Installing %s' % library_name)
subprocess.call([
'yarn', 'add', '%s@%s' % (library_name, version)])
def ask_user_to_confirm(message):
"""Asks user to perform a task and confirm once they are done.
Args:
message: str. The message which specifies the task user has
to do.
"""
while True:
python_utils.PRINT(
'******************************************************')
python_utils.PRINT(message)
python_utils.PRINT('Confirm once you are done by entering y/ye/yes.\n')
answer = python_utils.INPUT().lower()
if answer in release_constants.AFFIRMATIVE_CONFIRMATIONS:
return
def get_personal_access_token():
""""Returns the personal access token for the GitHub id of user.
Returns:
str. The personal access token for the GitHub id of user.
Raises:
Exception: Personal access token is None.
"""
personal_access_token = getpass.getpass(
prompt=(
'Please provide personal access token for your github ID. '
'You can create one at https://github.com/settings/tokens: '))
if personal_access_token is None:
raise Exception(
'No personal access token provided, please set up a personal '
'access token at https://github.com/settings/tokens and re-run '
'the script')
return personal_access_token
def check_blocking_bug_issue_count(repo):
"""Checks the number of unresolved blocking bugs.
Args:
repo: github.Repository.Repository. The PyGithub object for the repo.
Raises:
Exception: Number of unresolved blocking bugs is not zero.
Exception: The blocking bug milestone is closed.
"""
blocking_bugs_milestone = repo.get_milestone(
number=release_constants.BLOCKING_BUG_MILESTONE_NUMBER)
if blocking_bugs_milestone.state == 'closed':
raise Exception('The blocking bug milestone is closed.')
if blocking_bugs_milestone.open_issues:
open_new_tab_in_browser_if_possible(
'https://github.com/oppia/oppia/issues?q=is%3Aopen+'
'is%3Aissue+milestone%3A%22Blocking+bugs%22')
raise Exception(
'There are %s unresolved blocking bugs. Please ensure '
'that they are resolved before release summary generation.' % (
blocking_bugs_milestone.open_issues))
def check_prs_for_current_release_are_released(repo):
"""Checks that all pull requests for current release have a
'PR: released' label.
Args:
repo: github.Repository.Repository. The PyGithub object for the repo.
Raises:
Exception: Some pull requests for current release do not have a
PR: released label.
"""
current_release_label = repo.get_label(
release_constants.LABEL_FOR_CURRENT_RELEASE_PRS)
current_release_prs = repo.get_issues(
state='all', labels=[current_release_label])
for pr in current_release_prs:
label_names = [label.name for label in pr.labels]
if release_constants.LABEL_FOR_RELEASED_PRS not in label_names:
open_new_tab_in_browser_if_possible(
'https://github.com/oppia/oppia/pulls?utf8=%E2%9C%93&q=is%3Apr'
'+label%3A%22PR%3A+for+current+release%22+')
raise Exception(
'There are PRs for current release which do not have '
'a \'PR: released\' label. Please ensure that they are '
'released before release summary generation.')
class CD(python_utils.OBJECT):
"""Context manager for changing the current working directory."""
def __init__(self, new_path):
self.new_path = new_path
self.saved_path = None
def __enter__(self):
self.saved_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
os.chdir(self.saved_path)
| 36.594667 | 79 | 0.663193 |
673d694615fde0ce4ba64e70600bf1754b9dc83f | 14,217 | py | Python | datasets/captioning_dataset.py | Harbar-Inbound/BMT | ec8826f0633db754c7ea8d206672aa0b6b6048fd | [
"MIT"
] | null | null | null | datasets/captioning_dataset.py | Harbar-Inbound/BMT | ec8826f0633db754c7ea8d206672aa0b6b6048fd | [
"MIT"
] | null | null | null | datasets/captioning_dataset.py | Harbar-Inbound/BMT | ec8826f0633db754c7ea8d206672aa0b6b6048fd | [
"MIT"
] | null | null | null | import pandas as pd
import spacy
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import Dataset
from torchtext.legacy import data
from datasets.load_features import fill_missing_features, load_features_from_npy
def caption_iterator(cfg, batch_size, phase):
print(f'Contructing caption_iterator for "{phase}" phase')
spacy_en = spacy.load('en')
def tokenize_en(txt):
return [token.text for token in spacy_en.tokenizer(txt)]
CAPTION = data.ReversibleField(
tokenize='spacy', init_token=cfg.start_token, eos_token=cfg.end_token,
pad_token=cfg.pad_token, lower=True, batch_first=True, is_target=True
)
INDEX = data.Field(
sequential=False, use_vocab=False, batch_first=True
)
# the order has to be the same as in the table
fields = [
('video_id', None),
('caption', CAPTION),
('start', None),
('end', None),
('duration', None),
('phase', None),
('idx', INDEX),
]
dataset = data.TabularDataset(
path=cfg.train_meta_path, format='tsv', skip_header=True, fields=fields,
)
CAPTION.build_vocab(
dataset.caption, min_freq=cfg.min_freq_caps, vectors=cfg.word_emb_caps)
train_vocab = CAPTION.vocab
if phase == 'val_1':
dataset = data.TabularDataset(
path=cfg.val_1_meta_path, format='tsv', skip_header=True, fields=fields)
elif phase == 'val_2':
dataset = data.TabularDataset(
path=cfg.val_2_meta_path, format='tsv', skip_header=True, fields=fields)
elif phase == 'learned_props':
dataset = data.TabularDataset(
path=cfg.val_prop_meta_path, format='tsv', skip_header=True, fields=fields)
# sort_key = lambda x: data.interleave_keys(len(x.caption), len(y.caption))
datasetloader = data.BucketIterator(dataset, batch_size, sort_key=lambda x: 0,
device=torch.device(cfg.device), repeat=False, shuffle=True)
return train_vocab, datasetloader
class I3DFeaturesDataset(Dataset):
def __init__(self, features_path, feature_name, meta_path, device, pad_idx, get_full_feat, cfg):
self.cfg = cfg
self.features_path = features_path
self.feature_name = f'{feature_name}_features'
self.feature_names_list = [self.feature_name]
self.device = device
self.dataset = pd.read_csv(meta_path, sep='\t')
self.pad_idx = pad_idx
self.get_full_feat = get_full_feat
if self.feature_name == 'i3d_features':
self.feature_size = 1024
else:
raise Exception(f'Inspect: "{self.feature_name}"')
def __getitem__(self, indices):
video_ids, captions, starts, ends, vid_stacks_rgb, vid_stacks_flow = [], [], [], [], [], []
for idx in indices:
idx = idx.item()
video_id, caption, start, end, duration, _, _ = self.dataset.iloc[idx]
stack = load_features_from_npy(
self.cfg, self.feature_names_list, video_id, start, end, duration,
self.pad_idx, self.get_full_feat
)
vid_stack_rgb, vid_stack_flow = stack['rgb'], stack['flow']
# either both None or both are not None (Boolean Equivalence)
both_are_None = vid_stack_rgb is None and vid_stack_flow is None
none_is_None = vid_stack_rgb is not None and vid_stack_flow is not None
assert both_are_None or none_is_None
# # sometimes stack is empty after the filtering. we replace it with noise
if both_are_None:
# print(f'RGB and FLOW are None. Zero (1, D) @: {video_id}')
vid_stack_rgb = fill_missing_features(
'zero', self.feature_size)
vid_stack_flow = fill_missing_features(
'zero', self.feature_size)
# append info for this index to the lists
video_ids.append(video_id)
captions.append(caption)
starts.append(start)
ends.append(end)
vid_stacks_rgb.append(vid_stack_rgb)
vid_stacks_flow.append(vid_stack_flow)
vid_stacks_rgb = pad_sequence(
vid_stacks_rgb, batch_first=True, padding_value=self.pad_idx)
vid_stacks_flow = pad_sequence(
vid_stacks_flow, batch_first=True, padding_value=0)
starts = torch.tensor(starts).unsqueeze(1)
ends = torch.tensor(ends).unsqueeze(1)
batch_dict = {
'video_ids': video_ids,
'captions': captions,
'starts': starts.to(self.device),
'ends': ends.to(self.device),
'feature_stacks': {
'rgb': vid_stacks_rgb.to(self.device),
'flow': vid_stacks_flow.to(self.device),
}
}
return batch_dict
def __len__(self):
return len(self.dataset)
class VGGishFeaturesDataset(Dataset):
def __init__(self, features_path, feature_name, meta_path, device, pad_idx, get_full_feat, cfg):
self.cfg = cfg
self.features_path = features_path
self.feature_name = 'vggish_features'
self.feature_names_list = [self.feature_name]
self.device = device
self.dataset = pd.read_csv(meta_path, sep='\t')
self.pad_idx = pad_idx
self.get_full_feat = get_full_feat
self.feature_size = 128
def __getitem__(self, indices):
video_ids, captions, starts, ends, aud_stacks = [], [], [], [], []
# [3]
for idx in indices:
idx = idx.item()
video_id, caption, start, end, duration, _, _ = self.dataset.iloc[idx]
stack = load_features_from_npy(
self.cfg, self.feature_names_list, video_id, start, end, duration,
self.pad_idx, self.get_full_feat
)
aud_stack = stack['audio']
# sometimes stack is empty after the filtering. we replace it with noise
if aud_stack is None:
# print(f'VGGish is None. Zero (1, D) @: {video_id}')
aud_stack = fill_missing_features('zero', self.feature_size)
# append info for this index to the lists
video_ids.append(video_id)
captions.append(caption)
starts.append(start)
ends.append(end)
aud_stacks.append(aud_stack)
# [4] see ActivityNetCaptionsDataset.__getitem__ documentation
aud_stacks = pad_sequence(
aud_stacks, batch_first=True, padding_value=self.pad_idx)
starts = torch.tensor(starts).unsqueeze(1)
ends = torch.tensor(ends).unsqueeze(1)
batch_dict = {
'video_ids': video_ids,
'captions': captions,
'starts': starts.to(self.device),
'ends': ends.to(self.device),
'feature_stacks': {
'audio': aud_stacks.to(self.device),
}
}
return batch_dict
def __len__(self):
return len(self.dataset)
class AudioVideoFeaturesDataset(Dataset):
def __init__(self, video_features_path, video_feature_name, audio_features_path,
audio_feature_name, meta_path, device, pad_idx, get_full_feat, cfg):
self.cfg = cfg
self.video_features_path = video_features_path
self.video_feature_name = f'{video_feature_name}_features'
self.audio_features_path = audio_features_path
self.audio_feature_name = f'{audio_feature_name}_features'
self.feature_names_list = [
self.video_feature_name, self.audio_feature_name]
self.device = device
self.dataset = pd.read_csv(meta_path, sep='\t')
self.pad_idx = pad_idx
self.get_full_feat = get_full_feat
if self.video_feature_name == 'i3d_features':
self.video_feature_size = 1024
else:
raise Exception(f'Inspect: "{self.video_feature_name}"')
if self.audio_feature_name == 'vggish_features':
self.audio_feature_size = 128
else:
raise Exception(f'Inspect: "{self.audio_feature_name}"')
def __getitem__(self, indices):
video_ids, captions, starts, ends = [], [], [], []
vid_stacks_rgb, vid_stacks_flow, aud_stacks = [], [], []
# [3]
for idx in indices:
idx = idx.item()
video_id, caption, start, end, duration, _, _ = self.dataset.iloc[idx]
stack = load_features_from_npy(
self.cfg, self.feature_names_list,
video_id, start, end, duration, self.pad_idx, self.get_full_feat
)
vid_stack_rgb, vid_stack_flow, aud_stack = stack['rgb'], stack['flow'], stack['audio']
# either both None or both are not None (Boolean Equivalence)
both_are_None = vid_stack_rgb is None and vid_stack_flow is None
none_is_None = vid_stack_rgb is not None and vid_stack_flow is not None
assert both_are_None or none_is_None
# sometimes vid_stack and aud_stack are empty after the filtering.
# we replace it with noise.
# tied with assertion above
if (vid_stack_rgb is None) and (vid_stack_flow is None):
# print(f'RGB and FLOW are None. Zero (1, D) @: {video_id}')
vid_stack_rgb = fill_missing_features(
'zero', self.video_feature_size)
vid_stack_flow = fill_missing_features(
'zero', self.video_feature_size)
if aud_stack is None:
# print(f'Audio is None. Zero (1, D) @: {video_id}')
aud_stack = fill_missing_features(
'zero', self.audio_feature_size)
# append info for this index to the lists
video_ids.append(video_id)
captions.append(caption)
starts.append(start)
ends.append(end)
vid_stacks_rgb.append(vid_stack_rgb)
vid_stacks_flow.append(vid_stack_flow)
aud_stacks.append(aud_stack)
# [4] see ActivityNetCaptionsDataset.__getitem__ documentation
# rgb is padded with pad_idx; flow is padded with 0s: expected to be summed later
vid_stacks_rgb = pad_sequence(
vid_stacks_rgb, batch_first=True, padding_value=self.pad_idx)
vid_stacks_flow = pad_sequence(
vid_stacks_flow, batch_first=True, padding_value=0)
aud_stacks = pad_sequence(
aud_stacks, batch_first=True, padding_value=self.pad_idx)
starts = torch.tensor(starts).unsqueeze(1)
ends = torch.tensor(ends).unsqueeze(1)
batch_dict = {
'video_ids': video_ids,
'captions': captions,
'starts': starts.to(self.device),
'ends': ends.to(self.device),
'feature_stacks': {
'rgb': vid_stacks_rgb.to(self.device),
'flow': vid_stacks_flow.to(self.device),
'audio': aud_stacks.to(self.device),
}
}
return batch_dict
def __len__(self):
return len(self.dataset)
class ActivityNetCaptionsDataset(Dataset):
def __init__(self, cfg, phase, get_full_feat):
'''
For the doc see the __getitem__.
'''
self.cfg = cfg
self.phase = phase
self.get_full_feat = get_full_feat
self.feature_names = f'{cfg.video_feature_name}_{cfg.audio_feature_name}'
if phase == 'train':
self.meta_path = cfg.train_meta_path
self.batch_size = cfg.train_batch_size
elif phase == 'val_1':
self.meta_path = cfg.val_1_meta_path
self.batch_size = cfg.inference_batch_size
elif phase == 'val_2':
self.meta_path = cfg.val_2_meta_path
self.batch_size = cfg.inference_batch_size
elif phase == 'learned_props':
self.meta_path = cfg.val_prop_meta_path
self.batch_size = cfg.inference_batch_size
else:
raise NotImplementedError
# caption dataset *iterator*
self.train_vocab, self.caption_loader = caption_iterator(
cfg, self.batch_size, self.phase)
self.trg_voc_size = len(self.train_vocab)
self.pad_idx = self.train_vocab.stoi[cfg.pad_token]
self.start_idx = self.train_vocab.stoi[cfg.start_token]
self.end_idx = self.train_vocab.stoi[cfg.end_token]
if cfg.modality == 'video':
self.features_dataset = I3DFeaturesDataset(
cfg.video_features_path, cfg.video_feature_name, self.meta_path,
torch.device(cfg.device), self.pad_idx, self.get_full_feat, cfg
)
elif cfg.modality == 'audio':
self.features_dataset = VGGishFeaturesDataset(
cfg.audio_features_path, cfg.audio_feature_name, self.meta_path,
torch.device(cfg.device), self.pad_idx, self.get_full_feat, cfg
)
elif cfg.modality == 'audio_video':
self.features_dataset = AudioVideoFeaturesDataset(
cfg.video_features_path, cfg.video_feature_name, cfg.audio_features_path,
cfg.audio_feature_name, self.meta_path, torch.device(
cfg.device), self.pad_idx,
self.get_full_feat, cfg
)
else:
raise Exception(
f'it is not implemented for modality: {cfg.modality}')
# initialize the caption loader iterator
self.caption_loader_iter = iter(self.caption_loader)
def __getitem__(self, dataset_index):
caption_data = next(self.caption_loader_iter)
to_return = self.features_dataset[caption_data.idx]
to_return['caption_data'] = caption_data
return to_return
def __len__(self):
return len(self.caption_loader)
def update_iterator(self):
'''This should be called after every epoch'''
self.caption_loader_iter = iter(self.caption_loader)
def dont_collate(self, batch):
return batch[0]
| 37.81117 | 100 | 0.615671 |
66c214c215fd2efbfc04c6a66994e84b0e2b6377 | 1,155 | py | Python | src/awsCluster/util/FastqcParser.py | AspirinCode/jupyter-genomics | d45526fab3de8fcc3d9fef005d4e39368ff3dfdc | [
"MIT"
] | 2 | 2019-01-04T08:17:27.000Z | 2021-04-10T02:59:35.000Z | src/cirrus_ngs/deprecated/util/FastqcParser.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | null | null | null | src/cirrus_ngs/deprecated/util/FastqcParser.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 2 | 2021-09-10T02:57:51.000Z | 2021-09-21T00:16:56.000Z | __author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import zipfile
import re
import os
def read_data_file(workspace, file_name):
archive = zipfile.ZipFile(workspace + "/" + file_name, 'r')
data_file = archive.read(file_name[:-4] + "/fastqc_data.txt")
percentages = 0.0
lines = re.split(r'\n+', data_file)
printable = False
for line in lines:
if line.find("Overrepresented sequences") > -1:
printable = True
continue
if printable and line.find(">>END_MODULE") > -1:
printable = False
if printable:
if line.startswith("#"):
continue
else:
fields = re.split(r'\t+', line)
if float(fields[2]) > 0.1:
percentages = percentages + float(fields[2])
print file_name + "\t" + str(percentages)
if __name__ == "__main__":
workspace = "/Users/guorongxu/Desktop/workspace/NGSProjects/SmallRNASeq/042716_Ying_Olefsky_mirseq"
for root, dirs, files in os.walk(workspace):
for file in files:
if file.endswith(".zip"):
read_data_file(workspace, file)
| 28.170732 | 103 | 0.587013 |
9f3293c9123a3f57888439e723f3797b9c7676b1 | 949 | py | Python | shiksha_buddy/shiksha_buddy/urls.py | emopankaj/shiksha-buddy | a178e5be8b2da5fdaca33e7385b0115d5361e55c | [
"MIT"
] | 1 | 2020-01-12T06:35:28.000Z | 2020-01-12T06:35:28.000Z | shiksha_buddy/shiksha_buddy/urls.py | emopankaj/shiksha-buddy | a178e5be8b2da5fdaca33e7385b0115d5361e55c | [
"MIT"
] | 4 | 2021-03-10T02:40:28.000Z | 2022-02-26T21:39:34.000Z | shiksha_buddy/shiksha_buddy/urls.py | emopankaj/shiksha-buddy | a178e5be8b2da5fdaca33e7385b0115d5361e55c | [
"MIT"
] | null | null | null | """shiksha_buddy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .routers import router
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('article', TemplateView.as_view(template_name='index.html')),
]
| 36.5 | 77 | 0.719705 |
f75a3f329682d190ddeb46937a0d3eaf4fce8d33 | 615 | py | Python | Mundo1/026.py | eliascastrosousa/exerciciospythonmundo1 | ec6326952e69ca60d018476308b96959b92a24d8 | [
"MIT"
] | null | null | null | Mundo1/026.py | eliascastrosousa/exerciciospythonmundo1 | ec6326952e69ca60d018476308b96959b92a24d8 | [
"MIT"
] | null | null | null | Mundo1/026.py | eliascastrosousa/exerciciospythonmundo1 | ec6326952e69ca60d018476308b96959b92a24d8 | [
"MIT"
] | null | null | null | # desafio 026: Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra "A", em que posição ela aparece a
# primeira vez e em que posição ela aparece a última vez.
autor = input('Digite o seu autor preferido: ').strip()
autor_lower = (autor.lower())
quantidade = (autor_lower.count('a'))
primeiroa = (autor_lower.find('a'))
ultimoa = (autor_lower.rfind('a'))
print('o nome do autor {}'.format(autor))
print ('a letra a, aparece {} vezes.' .format(quantidade))
print ('Aparece pela primeira vez em {}.' .format(primeiroa+1))
print('aparece pela ultima vez em {}.' .format(ultimoa+1))
| 47.307692 | 136 | 0.715447 |
f9b6e67c0e7aaf44b705a82893ad782d1c457d76 | 4,497 | py | Python | perfkitbenchmarker/network.py | gsee2000/PerfKitBenchmarker | c49fff81040cbc2383d8f4d9513e4f610b65c2db | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/network.py | gsee2000/PerfKitBenchmarker | c49fff81040cbc2383d8f4d9513e4f610b65c2db | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/network.py | gsee2000/PerfKitBenchmarker | c49fff81040cbc2383d8f4d9513e4f610b65c2db | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing abstract classes related to VM networking.
The Firewall class provides a way of opening VM ports. The Network class allows
VMs to communicate via internal ips and isolates PerfKitBenchmarker VMs from
others in the
same project.
"""
from perfkitbenchmarker import context
from perfkitbenchmarker import errors
class BaseFirewall(object):
"""An object representing the Base Firewall."""
CLOUD = None
@classmethod
def GetFirewall(cls):
"""Returns a BaseFirewall.
This method is used instead of directly calling the class's constructor.
It creates BaseFirewall instances and registers them.
If a BaseFirewall object has already been registered, that object
will be returned rather than creating a new one. This enables multiple
VMs to call this method and all share the same BaseFirewall object.
"""
if cls.CLOUD is None:
raise errors.Error('Firewalls should have CLOUD attributes.')
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('GetFirewall called in a thread without a '
'BenchmarkSpec.')
with benchmark_spec.firewalls_lock:
key = cls.CLOUD
if key not in benchmark_spec.firewalls:
benchmark_spec.firewalls[key] = cls()
return benchmark_spec.firewalls[key]
def AllowIcmp(self, vm):
"""Opens the ICMP protocol on the firewall.
Args:
vm: The BaseVirtualMachine object to open the ICMP protocol for.
"""
pass
def AllowPort(self, vm, start_port, end_port=None):
"""Opens a port on the firewall.
Args:
vm: The BaseVirtualMachine object to open the port for.
start_port: The first local port in a range of ports to open.
end_port: The last port in a range of ports to open. If None, only
start_port will be opened.
"""
pass
def DisallowAllPorts(self):
"""Closes all ports on the firewall."""
pass
class BaseNetworkSpec(object):
"""Object containing all information needed to create a Network."""
def __init__(self, zone=None):
"""Initializes the BaseNetworkSpec.
Args:
zone: The zone in which to create the network.
"""
self.zone = zone
class BaseNetwork(object):
"""Object representing a Base Network."""
CLOUD = None
def __init__(self, spec):
self.zone = spec.zone
@staticmethod
def _GetNetworkSpecFromVm(vm):
"""Returns a BaseNetworkSpec created from VM attributes."""
return BaseNetworkSpec(zone=vm.zone)
@classmethod
def _GetKeyFromNetworkSpec(cls, spec):
"""Returns a key used to register Network instances."""
if cls.CLOUD is None:
raise errors.Error('Networks should have CLOUD attributes.')
return (cls.CLOUD, spec.zone)
@classmethod
def GetNetwork(cls, vm):
"""Returns a BaseNetwork.
This method is used instead of directly calling the class's constructor.
It creates BaseNetwork instances and registers them. If a BaseNetwork
object has already been registered with the same key, that object
will be returned rather than creating a new one. This enables multiple
VMs to call this method and all share the same BaseNetwork object.
Args:
vm: The VM for which the Network is being created.
"""
benchmark_spec = context.GetThreadBenchmarkSpec()
if benchmark_spec is None:
raise errors.Error('GetNetwork called in a thread without a '
'BenchmarkSpec.')
spec = cls._GetNetworkSpecFromVm(vm)
key = cls._GetKeyFromNetworkSpec(spec)
with benchmark_spec.networks_lock:
if key not in benchmark_spec.networks:
benchmark_spec.networks[key] = cls(spec)
return benchmark_spec.networks[key]
def Create(self):
"""Creates the actual network."""
pass
def Delete(self):
"""Deletes the actual network."""
pass
| 31.893617 | 79 | 0.71203 |
2afce7f2a18dee8ba1ff820796237f9e030383aa | 107 | py | Python | python_crash_course/2-5.py | poplol240/programs | 73660555b866702091141a7c2231f96f18894cf5 | [
"MIT"
] | null | null | null | python_crash_course/2-5.py | poplol240/programs | 73660555b866702091141a7c2231f96f18894cf5 | [
"MIT"
] | null | null | null | python_crash_course/2-5.py | poplol240/programs | 73660555b866702091141a7c2231f96f18894cf5 | [
"MIT"
] | null | null | null | message = '"A small step of human, a huge step for humanity"'
print("Neil Amstrong once said: " + message) | 53.5 | 62 | 0.71028 |
4b454b678fef1df5cd89bdb08413cc65545c6169 | 23,089 | py | Python | tests/integration/test_pr_comment.py | FrNecas/packit-service | f7101d20e2e09c4940009225f0cd9ccf644f646f | [
"MIT"
] | 11 | 2020-08-12T12:51:32.000Z | 2022-02-05T09:48:11.000Z | tests/integration/test_pr_comment.py | FrNecas/packit-service | f7101d20e2e09c4940009225f0cd9ccf644f646f | [
"MIT"
] | 727 | 2020-08-03T07:53:44.000Z | 2022-03-31T12:51:19.000Z | tests/integration/test_pr_comment.py | FrNecas/packit-service | f7101d20e2e09c4940009225f0cd9ccf644f646f | [
"MIT"
] | 9 | 2021-01-08T13:54:07.000Z | 2022-02-14T14:33:08.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import json
from typing import List
import pytest
from celery.canvas import Signature
from flexmock import flexmock
from github import Github
from packit_service.worker.monitoring import Pushgateway
from ogr.services.github import GithubProject
from packit.config import JobConfigTriggerType
from packit.local_project import LocalProject
from packit_service.config import ServiceConfig
from packit_service.constants import (
SANDCASTLE_WORK_DIR,
TASK_ACCEPTED,
PG_COPR_BUILD_STATUS_SUCCESS,
)
from packit_service.models import PullRequestModel, JobTriggerModelType, JobTriggerModel
from packit_service.service.db_triggers import AddPullRequestDbTrigger
from packit_service.worker.build import copr_build
from packit_service.worker.build.copr_build import CoprBuildJobHelper
from packit_service.worker.build.koji_build import KojiBuildJobHelper
from packit_service.worker.jobs import SteveJobs, get_packit_commands_from_comment
from packit_service.worker.result import TaskResults
from packit_service.worker.tasks import (
run_copr_build_handler,
run_koji_build_handler,
run_testing_farm_handler,
)
from packit_service.worker.testing_farm import TestingFarmJobHelper
from packit_service.worker.allowlist import Allowlist
from packit_service.worker.reporting import BaseCommitStatus
from tests.spellbook import DATA_DIR, first_dict_value, get_parameters_from_results
@pytest.fixture(scope="module")
def pr_copr_build_comment_event():
return json.loads(
(DATA_DIR / "webhooks" / "github" / "pr_comment_copr_build.json").read_text()
)
@pytest.fixture(scope="module")
def pr_build_comment_event():
return json.loads(
(DATA_DIR / "webhooks" / "github" / "pr_comment_build.json").read_text()
)
@pytest.fixture(scope="module")
def pr_production_build_comment_event():
return json.loads(
(
DATA_DIR / "webhooks" / "github" / "pr_comment_production_build.json"
).read_text()
)
@pytest.fixture(scope="module")
def pr_embedded_command_comment_event():
return json.loads(
(
DATA_DIR / "webhooks" / "github" / "pr_comment_embedded_command.json"
).read_text()
)
@pytest.fixture(scope="module")
def pr_empty_comment_event():
return json.loads(
(DATA_DIR / "webhooks" / "github" / "pr_comment_empty.json").read_text()
)
@pytest.fixture(scope="module")
def pr_packit_only_comment_event():
return json.loads(
(
DATA_DIR / "webhooks" / "github" / "issue_comment_packit_only.json"
).read_text()
)
@pytest.fixture(scope="module")
def pr_wrong_packit_comment_event():
return json.loads(
(
DATA_DIR / "webhooks" / "github" / "issue_comment_wrong_packit_command.json"
).read_text()
)
@pytest.fixture(
params=[
[
{
"trigger": "pull_request",
"job": "copr_build",
"metadata": {"targets": "fedora-rawhide-x86_64"},
}
],
[
{
"trigger": "pull_request",
"job": "tests",
"metadata": {"targets": "fedora-rawhide-x86_64"},
}
],
[
{
"trigger": "pull_request",
"job": "copr_build",
"metadata": {"targets": "fedora-rawhide-x86_64"},
},
{
"trigger": "pull_request",
"job": "tests",
"metadata": {"targets": "fedora-rawhide-x86_64"},
},
],
]
)
def mock_pr_comment_functionality(request):
packit_yaml = (
"{'specfile_path': 'the-specfile.spec', 'synced_files': [], 'jobs': "
+ str(request.param)
+ "}"
)
flexmock(
GithubProject,
full_repo_name="packit-service/hello-world",
get_file_content=lambda path, ref: packit_yaml,
get_files=lambda ref, filter_regex: ["the-specfile.spec"],
get_web_url=lambda: "https://github.com/the-namespace/the-repo",
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
config = ServiceConfig()
config.command_handler_work_dir = SANDCASTLE_WORK_DIR
flexmock(ServiceConfig).should_receive("get_service_config").and_return(config)
trigger = flexmock(
job_config_trigger_type=JobConfigTriggerType.pull_request, id=123
)
flexmock(AddPullRequestDbTrigger).should_receive("db_trigger").and_return(trigger)
flexmock(PullRequestModel).should_receive("get_by_id").with_args(123).and_return(
trigger
)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(Allowlist, check_and_report=True)
def one_job_finished_with_msg(results: List[TaskResults], msg: str):
for value in results:
assert value["success"]
if value["details"]["msg"] == msg:
break
else:
raise AssertionError(f"None of the jobs finished with {msg!r}")
def test_pr_comment_copr_build_handler(
mock_pr_comment_functionality, pr_copr_build_comment_event
):
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(id=9, job_config_trigger_type=JobConfigTriggerType.pull_request)
)
flexmock(CoprBuildJobHelper).should_receive("run_copr_build").and_return(
TaskResults(success=True, details={})
).once()
flexmock(GithubProject).should_receive("get_files").and_return(["foo.spec"])
flexmock(GithubProject).should_receive("get_web_url").and_return(
"https://github.com/the-namespace/the-repo"
)
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(copr_build).should_receive("get_valid_build_targets").and_return(set())
flexmock(CoprBuildJobHelper).should_receive("report_status_to_all").with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").twice().and_return()
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
processing_results = SteveJobs().process_message(pr_copr_build_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_copr_build_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
def test_pr_comment_build_handler(
mock_pr_comment_functionality, pr_build_comment_event
):
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(id=9, job_config_trigger_type=JobConfigTriggerType.pull_request)
)
flexmock(CoprBuildJobHelper).should_receive("run_copr_build").and_return(
TaskResults(success=True, details={})
)
flexmock(GithubProject, get_files="foo.spec")
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(copr_build).should_receive("get_valid_build_targets").and_return(set())
flexmock(CoprBuildJobHelper).should_receive("report_status_to_all").with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").twice().and_return()
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
processing_results = SteveJobs().process_message(pr_build_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_copr_build_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
def test_pr_comment_production_build_handler(pr_production_build_comment_event):
packit_yaml = str(
{
"specfile_path": "the-specfile.spec",
"synced_files": [],
"jobs": [
{
"trigger": "pull_request",
"job": "production_build",
"metadata": {"targets": "fedora-rawhide-x86_64", "scratch": "true"},
}
],
}
)
comment = flexmock(add_reaction=lambda reaction: None)
flexmock(
GithubProject,
full_repo_name="packit-service/hello-world",
get_file_content=lambda path, ref: packit_yaml,
get_files=lambda ref, filter_regex: ["the-specfile.spec"],
get_web_url=lambda: "https://github.com/the-namespace/the-repo",
get_pr=lambda pr_id: flexmock(
head_commit="12345", get_comment=lambda comment_id: comment
),
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
config = ServiceConfig()
config.command_handler_work_dir = SANDCASTLE_WORK_DIR
flexmock(ServiceConfig).should_receive("get_service_config").and_return(config)
trigger = flexmock(
job_config_trigger_type=JobConfigTriggerType.pull_request, id=123
)
flexmock(AddPullRequestDbTrigger).should_receive("db_trigger").and_return(trigger)
flexmock(PullRequestModel).should_receive("get_by_id").with_args(123).and_return(
trigger
)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(Allowlist, check_and_report=True)
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(id=9, job_config_trigger_type=JobConfigTriggerType.pull_request)
)
flexmock(KojiBuildJobHelper).should_receive("run_koji_build").and_return(
TaskResults(success=True, details={})
)
flexmock(GithubProject, get_files="foo.spec")
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(KojiBuildJobHelper).should_receive("report_status_to_all").with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").twice().and_return()
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
processing_results = SteveJobs().process_message(pr_production_build_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_koji_build_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
@pytest.mark.parametrize(
"comment",
(
"",
" ",
" ",
"some unrelated",
"some\nmore\nunrelated\ntext",
"even\nsome → unicode",
" stuff",
" \n ",
"x ",
"""comment with embedded /packit build not recognized
unless /packit command is on line by itself""",
"\n2nd line\n\n4th line",
"1st line\n\t\n\t\t\n4th line\n",
),
)
def test_pr_comment_invalid(comment):
commands = get_packit_commands_from_comment(comment)
assert len(commands) == 0
@pytest.mark.parametrize(
"comments_list",
(
"/packit build",
"/packit build ",
"/packit build ",
" /packit build",
" /packit build ",
"asd\n/packit build\n",
"asd\n /packit build \n",
"Should be fixed now, let's\n /packit build\n it.",
),
)
def test_pr_embedded_command_handler(
mock_pr_comment_functionality, pr_embedded_command_comment_event, comments_list
):
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(id=9, job_config_trigger_type=JobConfigTriggerType.pull_request)
)
pr_embedded_command_comment_event["comment"]["body"] = comments_list
flexmock(CoprBuildJobHelper).should_receive("run_copr_build").and_return(
TaskResults(success=True, details={})
)
flexmock(GithubProject, get_files="foo.spec")
flexmock(GithubProject).should_receive("is_private").and_return(False)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
flexmock(copr_build).should_receive("get_valid_build_targets").and_return(set())
flexmock(CoprBuildJobHelper).should_receive("report_status_to_all").with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
flexmock(Signature).should_receive("apply_async").once()
flexmock(Pushgateway).should_receive("push").twice().and_return()
processing_results = SteveJobs().process_message(pr_embedded_command_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
results = run_copr_build_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
assert first_dict_value(results["job"])["success"]
def test_pr_comment_empty_handler(
mock_pr_comment_functionality, pr_empty_comment_event
):
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(GithubProject).should_receive("can_merge_pr").and_return(True)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
results = SteveJobs().process_message(pr_empty_comment_event)
assert results == []
def test_pr_comment_packit_only_handler(
mock_pr_comment_functionality, pr_packit_only_comment_event
):
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(GithubProject).should_receive("can_merge_pr").and_return(True)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
results = SteveJobs().process_message(pr_packit_only_comment_event)
assert results == []
def test_pr_comment_wrong_packit_command_handler(
mock_pr_comment_functionality, pr_wrong_packit_comment_event
):
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(GithubProject).should_receive("can_merge_pr").and_return(True)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
results = SteveJobs().process_message(pr_wrong_packit_comment_event)
assert results == []
def test_pr_test_command_handler(pr_embedded_command_comment_event):
jobs = [
{
"trigger": "pull_request",
"job": "tests",
"metadata": {"targets": "fedora-rawhide-x86_64"},
}
]
packit_yaml = (
"{'specfile_path': 'the-specfile.spec', 'synced_files': [], 'jobs': "
+ str(jobs)
+ "}"
)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
flexmock(
GithubProject,
full_repo_name="packit-service/hello-world",
get_file_content=lambda path, ref: packit_yaml,
get_files=lambda ref, filter_regex: ["the-specfile.spec"],
get_web_url=lambda: "https://github.com/the-namespace/the-repo",
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
config = ServiceConfig()
config.command_handler_work_dir = SANDCASTLE_WORK_DIR
flexmock(ServiceConfig).should_receive("get_service_config").and_return(config)
trigger = flexmock(
job_config_trigger_type=JobConfigTriggerType.pull_request, id=123
)
flexmock(AddPullRequestDbTrigger).should_receive("db_trigger").and_return(trigger)
flexmock(PullRequestModel).should_receive("get_by_id").with_args(123).and_return(
trigger
)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(Allowlist, check_and_report=True)
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(id=9, job_config_trigger_type=JobConfigTriggerType.pull_request)
)
pr_embedded_command_comment_event["comment"]["body"] = "/packit test"
flexmock(GithubProject, get_files="foo.spec")
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(Signature).should_receive("apply_async").once()
flexmock(copr_build).should_receive("get_valid_build_targets").twice().and_return(
{"test-target"}
)
flexmock(TestingFarmJobHelper).should_receive("get_latest_copr_build").and_return(
flexmock(status=PG_COPR_BUILD_STATUS_SUCCESS)
)
flexmock(TestingFarmJobHelper).should_receive("run_testing_farm").once().and_return(
TaskResults(success=True, details={})
)
flexmock(Pushgateway).should_receive("push").twice().and_return()
flexmock(CoprBuildJobHelper).should_receive("report_status_to_tests").with_args(
description=TASK_ACCEPTED,
state=BaseCommitStatus.pending,
url="",
).once()
processing_results = SteveJobs().process_message(pr_embedded_command_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
run_testing_farm_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
def test_pr_test_command_handler_missing_build(pr_embedded_command_comment_event):
jobs = [
{
"trigger": "pull_request",
"job": "tests",
"metadata": {"targets": "fedora-rawhide-x86_64"},
}
]
packit_yaml = (
"{'specfile_path': 'the-specfile.spec', 'synced_files': [], 'jobs': "
+ str(jobs)
+ "}"
)
pr = flexmock(head_commit="12345")
flexmock(GithubProject).should_receive("get_pr").and_return(pr)
comment = flexmock()
flexmock(pr).should_receive("get_comment").and_return(comment)
flexmock(comment).should_receive("add_reaction").with_args("+1").once()
flexmock(
GithubProject,
full_repo_name="packit-service/hello-world",
get_file_content=lambda path, ref: packit_yaml,
get_files=lambda ref, filter_regex: ["the-specfile.spec"],
get_web_url=lambda: "https://github.com/the-namespace/the-repo",
)
flexmock(Github, get_repo=lambda full_name_or_id: None)
config = ServiceConfig()
config.command_handler_work_dir = SANDCASTLE_WORK_DIR
flexmock(ServiceConfig).should_receive("get_service_config").and_return(config)
trigger = flexmock(
job_config_trigger_type=JobConfigTriggerType.pull_request, id=123
)
flexmock(AddPullRequestDbTrigger).should_receive("db_trigger").and_return(trigger)
flexmock(PullRequestModel).should_receive("get_by_id").with_args(123).and_return(
trigger
)
flexmock(LocalProject, refresh_the_arguments=lambda: None)
flexmock(Allowlist, check_and_report=True)
flexmock(PullRequestModel).should_receive("get_or_create").with_args(
pr_id=9,
namespace="packit-service",
repo_name="hello-world",
project_url="https://github.com/packit-service/hello-world",
).and_return(
flexmock(
id=9,
job_config_trigger_type=JobConfigTriggerType.pull_request,
job_trigger_model_type=JobTriggerModelType.pull_request,
)
)
flexmock(JobTriggerModel).should_receive("get_or_create").with_args(
type=JobTriggerModelType.pull_request, trigger_id=9
).and_return(trigger)
pr_embedded_command_comment_event["comment"]["body"] = "/packit test"
flexmock(GithubProject, get_files="foo.spec")
flexmock(GithubProject).should_receive("is_private").and_return(False)
flexmock(Signature).should_receive("apply_async").twice()
flexmock(copr_build).should_receive("get_valid_build_targets").and_return(
{"test-target", "test-target-without-build"}
)
flexmock(TestingFarmJobHelper).should_receive("get_latest_copr_build").and_return(
flexmock(status=PG_COPR_BUILD_STATUS_SUCCESS)
).and_return()
flexmock(TestingFarmJobHelper).should_receive("job_owner").and_return("owner")
flexmock(TestingFarmJobHelper).should_receive("job_project").and_return("project")
flexmock(CoprBuildJobHelper).should_receive("report_status_to_tests").once()
flexmock(CoprBuildJobHelper).should_receive(
"report_status_to_test_for_chroot"
).once()
flexmock(TestingFarmJobHelper).should_receive("run_testing_farm").once().and_return(
TaskResults(success=False, details={})
)
flexmock(Pushgateway).should_receive("push").twice().and_return()
processing_results = SteveJobs().process_message(pr_embedded_command_comment_event)
event_dict, job, job_config, package_config = get_parameters_from_results(
processing_results
)
assert json.dumps(event_dict)
run_testing_farm_handler(
package_config=package_config,
event=event_dict,
job_config=job_config,
)
| 37.060995 | 88 | 0.69977 |
5eb911306d7de607a044b7f27d55a31a802a6388 | 3,620 | py | Python | tests/client/track.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 135 | 2020-01-14T17:47:26.000Z | 2022-03-25T18:30:04.000Z | tests/client/track.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 135 | 2020-01-13T22:56:35.000Z | 2022-03-11T19:41:36.000Z | tests/client/track.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 21 | 2020-01-16T16:01:23.000Z | 2022-02-17T12:46:32.000Z | import pytest
from ._resources import (
track_id,
track_ids,
track_relinked,
track_restricted,
track_no_audio_features,
album_id,
)
from tekore import HTTPError
class TestSpotifyTrack:
def test_track_with_market(self, app_client):
track = app_client.track(track_id, market='US')
assert track.id == track_id
assert track.available_markets is None
assert track.is_playable is not None
def test_track_no_market(self, app_client):
track = app_client.track(track_id, market=None)
assert track.id == track_id
assert len(track.available_markets) > 0
assert track.is_playable is None
def test_track_restricted(self, app_client):
track = app_client.track(track_restricted, market='SE')
assert track.is_playable is False
assert track.restrictions.reason == 'market'
def test_track_relinking(self, app_client):
track = app_client.track(track_relinked, market='US')
assert track_relinked != track.id
assert track_relinked == track.linked_from.id
assert track.is_playable is True
def test_tracks_with_market(self, app_client):
tracks = app_client.tracks(track_ids, market='US')
assert len(tracks) == len(track_ids)
assert all(track.available_markets is None for track in tracks)
assert all(track.is_playable is not None for track in tracks)
def test_tracks_no_market(self, app_client):
tracks = app_client.tracks(track_ids, market=None)
assert len(tracks) == len(track_ids)
assert all(len(track.available_markets) > 0 for track in tracks)
assert all(track.is_playable is None for track in tracks)
def test_simple_tracks_with_market(self, app_client):
tracks = app_client.album(album_id, market='US').tracks.items
assert all(track.available_markets is None for track in tracks)
assert all(track.is_playable is not None for track in tracks)
def test_simple_tracks_no_market(self, app_client):
tracks = app_client.album(album_id, market=None).tracks.items
assert all(len(track.available_markets) > 0 for track in tracks)
assert all(track.is_playable is None for track in tracks)
def test_track_audio_analysis(self, app_client):
app_client.track_audio_analysis(track_id)
def test_track_audio_features(self, app_client):
features = app_client.track_audio_features(track_id)
assert features.id == track_id
def test_tracks_audio_features(self, app_client):
features = app_client.tracks_audio_features(track_ids)
assert [f.id for f in features] == track_ids
@pytest.mark.skipif(
not track_no_audio_features,
reason='No known track without audio features'
)
def test_track_audio_features_not_found_raises(self, app_client):
with pytest.raises(HTTPError):
app_client.track_audio_features(track_no_audio_features)
@pytest.mark.skipif(
not track_no_audio_features,
reason='No known track without audio features'
)
def test_tracks_audio_features_not_found_is_none(self, app_client):
features = app_client.tracks_audio_features([track_no_audio_features])
assert features[0] is None
def test_track_from_token(self, user_client):
track = user_client.track(track_id, market='from_token')
assert track.id == track_id
def test_tracks_from_token(self, user_client):
tracks = user_client.tracks(track_ids, market='from_token')
assert len(tracks) == len(track_ids)
| 38.105263 | 78 | 0.710221 |
e05303d3bed41a24785a1568d319f2e845001d6c | 844 | py | Python | tests/deprecation_rules/boolean_field_default_value/test_checked_file.py | iwoca/django-seven | c7be98b73c139c9e74a9be94a0f20a723c739c80 | [
"BSD-3-Clause"
] | 9 | 2016-05-25T22:33:17.000Z | 2021-05-29T18:38:07.000Z | tests/deprecation_rules/boolean_field_default_value/test_checked_file.py | iwoca/django-upgrade-tools | c7be98b73c139c9e74a9be94a0f20a723c739c80 | [
"BSD-3-Clause"
] | 3 | 2016-06-19T21:17:06.000Z | 2016-07-20T20:26:14.000Z | tests/deprecation_rules/boolean_field_default_value/test_checked_file.py | iwoca/django-upgrade-tools | c7be98b73c139c9e74a9be94a0f20a723c739c80 | [
"BSD-3-Clause"
] | 1 | 2016-08-11T07:27:30.000Z | 2016-08-11T07:27:30.000Z |
from django.test import TestCase
from django.test.utils import override_settings
from django_seven.deprecated_rules.rules import BOOLEAN_DEFAULT
from tests.deprecation_rules.mixins import RuleCheckMixin
class TestBooleanFieldDefaultRule(RuleCheckMixin, TestCase):
@override_settings(DEPRECATED_RULES=[BOOLEAN_DEFAULT])
def test_validate_rule(self):
self.assert_report(__file__,
{
BOOLEAN_DEFAULT['name']: {
'lines': [
{
'content': ' bad_boolean_field = models.BooleanField()\n',
'number': 5,
'filename': '/tests/deprecation_rules/boolean_field_default_value/checked_file.py'
}
]
}
}
)
| 33.76 | 110 | 0.565166 |
6038d7983f3695253a003db3ad7f1a2da4e67dfb | 191,724 | py | Python | airflow/models.py | ychantit/incubator-airflow | 11e670ddbce419489c798f26d3e94e7d3a00f5eb | [
"Apache-2.0"
] | 1 | 2021-10-11T11:12:59.000Z | 2021-10-11T11:12:59.000Z | airflow/models.py | ychantit/incubator-airflow | 11e670ddbce419489c798f26d3e94e7d3a00f5eb | [
"Apache-2.0"
] | null | null | null | airflow/models.py | ychantit/incubator-airflow | 11e670ddbce419489c798f26d3e94e7d3a00f5eb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
from builtins import str
from builtins import object, bytes
import copy
from collections import namedtuple, defaultdict
import cryptography
from datetime import timedelta
import dill
import functools
import getpass
import imp
import importlib
import itertools
import zipfile
import jinja2
import json
import logging
import numbers
import os
import pickle
import re
import signal
import sys
import textwrap
import traceback
import warnings
import hashlib
import uuid
from datetime import datetime
from urllib.parse import urlparse, quote, parse_qsl
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float, LargeBinary)
from sqlalchemy import func, or_, and_, true as sqltrue
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import reconstructor, relationship, synonym
from sqlalchemy_utc import UtcDateTime
from croniter import croniter
import six
from airflow import settings, utils
from airflow.executors import GetDefaultExecutor, LocalExecutor
from airflow import configuration
from airflow.exceptions import (
AirflowDagCycleException, AirflowException, AirflowSkipException, AirflowTaskTimeout
)
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.lineage import apply_lineage, prepare_lineage
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import timezone
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
from airflow.utils.helpers import (
as_tuple, is_container, validate_key, pprinttable)
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
from airflow.utils.net import get_hostname
from airflow.utils.log.logging_mixin import LoggingMixin
install_aliases()
Base = declarative_base()
ID_LEN = 250
XCOM_RETURN_KEY = 'return_value'
Stats = settings.Stats
def get_fernet():
"""
Deferred load of Fernet key.
This function could fail either because Cryptography is not installed
or because the Fernet key is invalid.
:return: Fernet object
:raises: AirflowException if there's a problem trying to load Fernet
"""
try:
from cryptography.fernet import Fernet
except ImportError:
raise AirflowException('Failed to import Fernet, it may not be installed')
try:
return Fernet(configuration.conf.get('core', 'FERNET_KEY').encode('utf-8'))
except (ValueError, TypeError) as ve:
raise AirflowException("Could not create Fernet object: {}".format(ve))
# Used by DAG context_managers
_CONTEXT_MANAGER_DAG = None
def clear_task_instances(tis, session, activate_dag_runs=True, dag=None):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
class DagBag(BaseDagBag, LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: unicode
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
"""
# static class variables to detetct dag cycle
CYCLE_NEW = 0
CYCLE_IN_PROGRESS = 1
CYCLE_DONE = 2
def __init__(
self,
dag_folder=None,
executor=None,
include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES')):
# do not use default arg in signature, to fix import cycle on plugin load
if executor is None:
executor = GetDefaultExecutor()
dag_folder = dag_folder or settings.DAGS_FOLDER
self.log.info("Filling up the DagBag from %s", dag_folder)
self.dag_folder = dag_folder
self.dags = {}
# the file's last modified timestamp when we last read it
self.file_last_changed = {}
self.executor = executor
self.import_errors = {}
if include_examples:
example_dag_folder = os.path.join(
os.path.dirname(__file__),
'example_dags')
self.collect_dags(example_dag_folder)
self.collect_dags(dag_folder)
def size(self):
"""
:return: the amount of dags contained in this dagbag
"""
return len(self.dags)
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
"""
# If asking for a known subdag, we want to refresh the parent
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
):
# Reprocess source file
found_dags = self.process_file(
filepath=orm_dag.fileloc, only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
found_dags = []
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
self.log.exception(e)
return found_dags
mods = []
if not zipfile.is_zipfile(filepath):
if safe_mode and os.path.isfile(filepath):
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(configuration.conf.getint('core', "DAGBAG_IMPORT_TIMEOUT")):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.log.debug("Reading %s from %s", mod.filename, filepath)
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
return found_dags
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
if dag.fileloc != filepath:
dag.fileloc = filepath
try:
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
found_dags.append(dag)
found_dags += dag.subdags
except AirflowDagCycleException as cycle_exception:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = str(cycle_exception)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@provide_session
def kill_zombies(self, session=None):
"""
Fails tasks that haven't had a heartbeat in too long
"""
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = TaskInstance
secs = configuration.conf.getint('scheduler', 'scheduler_zombie_task_threshold')
limit_dttm = timezone.utcnow() - timedelta(seconds=secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
))
.all()
)
for ti in tis:
if ti and ti.dag_id in self.dags:
dag = self.dags[ti.dag_id]
if ti.task_id in dag.task_ids:
task = dag.get_task(ti.task_id)
ti.task = task
ti.handle_failure("{} killed as zombie".format(str(ti)))
self.log.info('Marked zombie job %s as failed', ti)
Stats.incr('zombies_killed')
session.commit()
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
"""
dag.test_cycle() # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
for task in dag.tasks:
settings.policy(task)
subdags = dag.subdags
try:
for subdag in subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.dags[dag.dag_id] = dag
self.log.debug('Loaded DAG {dag}'.format(**locals()))
except AirflowDagCycleException as cycle_exception:
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception('Exception bagging dag: {dag.dag_id}'.format(**locals()))
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if dag == root_dag:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise cycle_exception
def collect_dags(
self,
dag_folder=None,
only_if_updated=True):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a .airflowignore file is found while processing,
the directory, it will behaves much like a .gitignore does,
ignoring files that match any of the regex patterns specified
in the file.
"""
start_dttm = timezone.utcnow()
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
if os.path.isfile(dag_folder):
self.process_file(dag_folder, only_if_updated=only_if_updated)
elif os.path.isdir(dag_folder):
for root, dirs, files in os.walk(dag_folder, followlinks=True):
patterns = []
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
patterns += [p for p in f.read().split('\n') if p]
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(filepath):
continue
if not any(
[re.findall(p, filepath) for p in patterns]):
ts = timezone.utcnow()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated)
td = timezone.utcnow() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
stats.append(FileLoadStat(
filepath.replace(dag_folder, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
str([dag.dag_id for dag in found_dags]),
))
except Exception as e:
self.log.exception(e)
Stats.gauge(
'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1)
Stats.gauge(
'dagbag_size', len(self.dags), 1)
Stats.gauge(
'dagbag_import_errors', len(self.import_errors), 1)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.task_num for o in stats]),
table=pprinttable(stats),
)
@provide_session
def deactivate_inactive_dags(self, session=None):
active_dag_ids = [dag.dag_id for dag in list(self.dags.values())]
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
@provide_session
def paused_dags(self, session=None):
dag_ids = [dp.dag_id for dp in session.query(DagModel).filter(
DagModel.is_paused.__eq__(True))]
return dag_ids
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String(ID_LEN), unique=True)
email = Column(String(500))
superuser = False
def __repr__(self):
return self.username
def get_id(self):
return str(self.id)
def is_superuser(self):
return self.superuser
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry',),
('fs', 'File (path)'),
('ftp', 'FTP',),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('jenkins', 'Jenkins'),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('ssh', 'SSH',),
('cloudant', 'IBM Cloudant',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA',),
('redis', 'Redis',),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks',),
('aws', 'Amazon Web Services',),
('emr', 'Elastic MapReduce',),
('snowflake', 'Snowflake',),
('segment', 'Segment',),
('azure_data_lake', 'Azure Data Lake'),
('cassandra', 'Cassandra',),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
temp_uri = urlparse(uri)
hostname = temp_uri.hostname or ''
if '%2f' in hostname:
hostname = hostname.replace('%2f', '/').replace('%2F', '/')
conn_type = temp_uri.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
self.conn_type = conn_type
self.host = hostname
self.schema = temp_uri.path[1:]
self.login = temp_uri.username
self.password = temp_uri.password
self.port = temp_uri.port
if temp_uri.query:
self.extra = json.dumps(dict(parse_qsl(temp_uri.query)))
def get_password(self):
if self._password and self.is_encrypted:
try:
fernet = get_fernet()
except AirflowException:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
try:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except AirflowException:
self.log.exception("Failed to load fernet while encrypting value, "
"using non-encrypted value.")
self._password = value
self.is_encrypted = False
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
try:
fernet = get_fernet()
except AirflowException:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
try:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = True
except AirflowException:
self.log.exception("Failed to load fernet while encrypting value, "
"using non-encrypted value.")
self._extra = value
self.is_extra_encrypted = False
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def get_hook(self):
try:
if self.conn_type == 'mysql':
from airflow.hooks.mysql_hook import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.hooks.postgres_hook import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.hooks.hive_hooks import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.hooks.presto_hook import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.hooks.hive_hooks import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.hooks.sqlite_hook import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.hooks.jdbc_hook import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.hooks.mssql_hook import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.hooks.oracle_hook import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.contrib.hooks.vertica_hook import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.contrib.hooks.cloudant_hook import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.contrib.hooks.jira_hook import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.contrib.hooks.redis_hook import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.contrib.hooks.wasb_hook import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.hooks.docker_hook import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.contrib.hooks.cassandra_hook import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
except Exception:
pass
def __repr__(self):
return self.conn_id
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
class DagPickle(Base):
"""
Dags can originate from different places (user repos, master repo, ...)
and also get executed in different places (different executors). This
object represents a version of a DAG and becomes a source of truth for
a BackfillJob execution. A pickle is a native python serialized object,
and in this case gets stored in the database for the duration of the job.
The executors pick up the DagPickle id and read the dag definition from
the database.
"""
id = Column(Integer, primary_key=True)
pickle = Column(PickleType(pickler=dill))
created_dttm = Column(UtcDateTime, default=timezone.utcnow)
pickle_hash = Column(Text)
__tablename__ = "dag_pickle"
def __init__(self, dag):
self.dag_id = dag.dag_id
if hasattr(dag, 'template_env'):
dag.template_env = None
self.pickle_hash = hash(dag)
self.pickle = dag
class TaskInstance(Base, LoggingMixin):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
state = Column(String(20))
_try_number = Column('try_number', Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer, index=True)
pool = Column(String(50))
queue = Column(String(50))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(UtcDateTime)
pid = Column(Integer)
executor_config = Column(PickleType(pickler=dill))
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_state', state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
)
def __init__(self, task, execution_date, state=None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.task = task
self._log = logging.getLogger("airflow.task")
# make sure we have a localized execution_date stored in UTC
if execution_date and not timezone.is_localized(execution_date):
self.log.warning("execution date %s has no timezone information. Using "
"default from dag or system", execution_date)
if self.task.has_dag():
execution_date = timezone.make_aware(execution_date,
self.task.dag.timezone)
else:
execution_date = timezone.make_aware(execution_date)
execution_date = timezone.convert_to_utc(execution_date)
self.execution_date = execution_date
self.queue = task.queue
self.pool = task.pool
self.priority_weight = task.priority_weight_total
self.try_number = 0
self.max_tries = self.task.retries
self.unixname = getpass.getuser()
self.run_as_user = task.run_as_user
if state:
self.state = state
self.hostname = ''
self.executor_config = task.executor_config
self.init_on_load()
# Is this TaskInstance being currently running within `airflow run --raw`.
# Not persisted to the database so only valid for the current process
self.raw = False
@reconstructor
def init_on_load(self):
""" Initialize the attributes that aren't stored in the DB. """
self.test_mode = False # can be changed when calling 'run'
@property
def try_number(self):
"""
Return the try number that this task number will be when it is acutally
run.
If the TI is currently running, this will match the column in the
databse, in all othercases this will be incremenetd
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1
@try_number.setter
def try_number(self, value):
self._try_number = value
@property
def next_try_number(self):
return self._try_number + 1
def command(
self,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
return " ".join(self.command_as_list(
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path))
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag = self.task.dag
should_pass_filepath = not pickle_id and dag
if should_pass_filepath and dag.full_filepath != dag.filepath:
path = "DAGS_FOLDER/{}".format(dag.filepath)
elif should_pass_filepath and dag.full_filepath:
path = dag.full_filepath
else:
path = None
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
self.execution_date,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path)
@staticmethod
def generate_command(dag_id,
task_id,
execution_date,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
file_path=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None
):
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: unicode
:param task_id: Task ID
:type task_id: unicode
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: bool
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
:param local: Whether to run the task locally
:type local: bool
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: unicode
:param file_path: path to the file containing the DAG definition
:param raw: raw mode (needs more details)
:param job_id: job ID (needs more details)
:param pool: the Airflow pool that the task should run in
:type pool: unicode
:param cfg_path: the Path to the configuration file
:type cfg_path: basestring
:return: shell command that can be used to run the task instance
"""
iso = execution_date.isoformat()
cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)]
cmd.extend(["--mark_success"]) if mark_success else None
cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None
cmd.extend(["--job_id", str(job_id)]) if job_id else None
cmd.extend(["-A"]) if ignore_all_deps else None
cmd.extend(["-i"]) if ignore_task_deps else None
cmd.extend(["-I"]) if ignore_depends_on_past else None
cmd.extend(["--force"]) if ignore_ti_state else None
cmd.extend(["--local"]) if local else None
cmd.extend(["--pool", pool]) if pool else None
cmd.extend(["--raw"]) if raw else None
cmd.extend(["-sd", file_path]) if file_path else None
cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None
return cmd
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(configuration.conf.get('core', 'BASE_LOG_FOLDER'))
return (
"{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals()))
@property
def log_url(self):
iso = quote(self.execution_date.isoformat())
BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
if settings.RBAC:
return BASE_URL + (
"/log/list/"
"?_flt_3_dag_id={self.dag_id}"
"&_flt_3_task_id={self.task_id}"
"&_flt_3_execution_date={iso}"
).format(**locals())
else:
return BASE_URL + (
"/admin/airflow/log"
"?dag_id={self.dag_id}"
"&task_id={self.task_id}"
"&execution_date={iso}"
).format(**locals())
@property
def mark_success_url(self):
iso = quote(self.execution_date.isoformat())
BASE_URL = configuration.conf.get('webserver', 'BASE_URL')
if settings.RBAC:
return BASE_URL + (
"/success"
"?task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
else:
return BASE_URL + (
"/admin/airflow/success"
"?task_id={self.task_id}"
"&dag_id={self.dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(**locals())
@provide_session
def current_state(self, session=None):
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state
@provide_session
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session=None, lock_for_update=False):
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
self.state = ti.state
self.start_date = ti.start_date
self.end_date = ti.end_date
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremeneted by one already.
self.try_number = ti._try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.pid = ti.pid
self.executor_config = ti.executor_config
else:
self.state = None
@provide_session
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
@property
def key(self):
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date
@provide_session
def set_state(self, state, session=None):
self.state = state
self.start_date = timezone.utcnow()
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
@property
def is_premature(self):
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == State.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@property
@provide_session
def previous_ti(self, session=None):
""" The task instance for the task that ran before this task instance """
dag = self.task.dag
if dag:
dr = self.get_dagrun(session=session)
# LEGACY: most likely running from unit tests
if not dr:
# Means that this TI is NOT being run from a DR, but from a catchup
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task,
execution_date=previous_scheduled_date)
dr.dag = dag
if dag.catchup:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
@provide_session
def are_dependencies_met(
self,
dep_context=None,
session=None,
verbose=False):
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:type dep_context: DepContext
:param session: database session
:type session: Session
:param verbose: whether log details on failed dependencies on
info or debug log level
:type verbose: boolean
"""
dep_context = dep_context or DepContext()
failed = False
verbose_aware_logger = self.log.info if verbose else self.log.debug
for dep_status in self.get_failed_dep_statuses(
dep_context=dep_context,
session=session):
failed = True
verbose_aware_logger(
"Dependencies not met for %s, dependency '%s' FAILED: %s",
self, dep_status.dep_name, dep_status.reason
)
if failed:
return False
verbose_aware_logger("Dependencies all met for %s", self)
return True
@provide_session
def get_failed_dep_statuses(
self,
dep_context=None,
session=None):
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(
self,
session,
dep_context):
self.log.debug(
"%s dependency '%s' PASSED: %s, %s",
self, dep_status.dep_name, dep_status.passed, dep_status.reason
)
if not dep_status.passed:
yield dep_status
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < timezone.utcnow())
@provide_session
def pool_full(self, session):
"""
Returns a boolean as to whether the slot pool has room for this
task to run
"""
if not self.task.pool:
return False
pool = (
session
.query(Pool)
.filter(Pool.pool == self.task.pool)
.first()
)
if not pool:
return False
open_slots = pool.open_slots(session=session)
return open_slots <= 0
@provide_session
def get_dagrun(self, session):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr
@provide_session
def _check_and_change_state_before_execution(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Checks dependencies and then sets state to RUNNING if they are met. Returns
True if and only if state is set to RUNNING, which implies that task should be
executed, in preparation for _run_raw_task
:param verbose: whether to turn on more verbose logging
:type verbose: boolean
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Don't check the dependencies of this TI's task
:type ignore_task_deps: boolean
:param ignore_ti_state: Disregards previous task instance state
:type ignore_ti_state: boolean
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
:return: whether the state was changed to running or not
:rtype: bool
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = get_hostname()
self.operator = task.__class__.__name__
if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS:
Stats.incr('previously_succeeded', 1, 1)
queue_dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps)
if not self.are_dependencies_met(
dep_context=queue_dep_context,
session=session,
verbose=True):
session.commit()
return False
# TODO: Logging needs cleanup, not clear what is being printed
hr = "\n" + ("-" * 80) + "\n" # Line break
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
msg = "Starting attempt {attempt} of {total}".format(
attempt=self.try_number,
total=self.max_tries + 1)
self.start_date = timezone.utcnow()
dep_context = DepContext(
deps=RUN_DEPS - QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
runnable = self.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True)
if not runnable and not mark_success:
# FIXME: we might have hit concurrency limits, which means we probably
# have been running prematurely. This should be handled in the
# scheduling mechanism.
self.state = State.NONE
msg = ("FIXME: Rescheduling due to concurrency limits reached at task "
"runtime. Attempt {attempt} of {total}. State set to NONE.").format(
attempt=self.try_number,
total=self.max_tries + 1)
self.log.warning(hr + msg + hr)
self.queued_dttm = timezone.utcnow()
self.log.info("Queuing into pool %s", self.pool)
session.merge(self)
session.commit()
return False
# Another worker might have started running this task instance while
# the current worker process was blocked on refresh_from_db
if self.state == State.RUNNING:
msg = "Task Instance already running {}".format(self)
self.log.warning(msg)
session.commit()
return False
# print status message
self.log.info(hr + msg + hr)
self._try_number += 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.pid = os.getpid()
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose()
if verbose:
if mark_success:
msg = "Marking success for {} on {}".format(self.task,
self.execution_date)
self.log.info(msg)
else:
msg = "Executing {} on {}".format(self.task, self.execution_date)
self.log.info(msg)
return True
@provide_session
def _run_raw_task(
self,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
Immediately runs the task (without checking or changing db state
before execution) and then sets the appropriate final state after
completion and runs any post-execute callbacks. Meant to be called
only after another function changes the state to running.
:param mark_success: Don't run the task, mark its state as success
:type mark_success: boolean
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: boolean
:param pool: specifies the pool to use to run the task instance
:type pool: str
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session)
self.job_id = job_id
self.hostname = get_hostname()
self.operator = task.__class__.__name__
context = {}
try:
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
self.log.error("Received SIGTERM. Terminating subprocesses.")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
self.render_templates()
task_copy.pre_execute(context=context)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
# TODO remove deprecated behavior in Airflow 2.0
try:
task_copy.post_execute(context=context, result=result)
except TypeError as e:
if 'unexpected keyword argument' in str(e):
warnings.warn(
'BaseOperator.post_execute() now takes two '
'arguments, `context` and `result`, but "{}" only '
'expected one. This behavior is deprecated and '
'will be removed in a future version of '
'Airflow.'.format(self.task_id),
category=DeprecationWarning)
task_copy.post_execute(context=context)
else:
raise
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
Stats.incr('ti_successes')
self.refresh_from_db(lock_for_update=True)
self.state = State.SUCCESS
except AirflowSkipException:
self.refresh_from_db(lock_for_update=True)
self.state = State.SKIPPED
except AirflowException as e:
self.refresh_from_db()
# for case when task is marked as success externally
# current behavior doesn't hit the success callback
if self.state == State.SUCCESS:
return
else:
self.handle_failure(e, test_mode, context)
raise
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Recording SUCCESS
self.end_date = timezone.utcnow()
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
self.log.error("Failed when executing success callback")
self.log.exception(e3)
session.commit()
@provide_session
def run(
self,
verbose=True,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
res = self._check_and_change_state_before_execution(
verbose=verbose,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
if res:
self._run_raw_task(
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
def dry_run(self):
task = self.task
task_copy = copy.copy(task)
self.task = task_copy
self.render_templates()
task_copy.dry_run()
@provide_session
def handle_failure(self, error, test_mode=False, context=None, session=None):
self.log.exception(error)
task = self.task
self.end_date = timezone.utcnow()
self.set_duration()
Stats.incr('operator_failures_{}'.format(task.__class__.__name__), 1, 1)
Stats.incr('ti_failures')
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
session.add(TaskFail(task, self.execution_date, self.start_date, self.end_date))
# Let's go deeper
try:
# Since this function is called only when the TI state is running,
# try_number contains the current try_number (not the next). We
# only mark task instance as FAILED if the next task instance
# try_number exceeds the max_tries.
if task.retries and self.try_number <= self.max_tries:
self.state = State.UP_FOR_RETRY
self.log.info('Marking task as UP_FOR_RETRY')
if task.email_on_retry and task.email:
self.email_alert(error, is_retry=True)
else:
self.state = State.FAILED
if task.retries:
self.log.info('All retries failed; marking task as FAILED')
else:
self.log.info('Marking task as FAILED.')
if task.email_on_failure and task.email:
self.email_alert(error, is_retry=False)
except Exception as e2:
self.log.error('Failed to send email to: %s', task.email)
self.log.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
self.log.error("Failed at executing callback")
self.log.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
self.log.error(str(error))
@provide_session
def get_template_context(self, session=None):
task = self.task
from airflow import macros
tables = None
if 'tables' in task.params:
tables = task.params['tables']
ds = self.execution_date.strftime('%Y-%m-%d')
ts = self.execution_date.isoformat()
yesterday_ds = (self.execution_date - timedelta(1)).strftime('%Y-%m-%d')
tomorrow_ds = (self.execution_date + timedelta(1)).strftime('%Y-%m-%d')
prev_execution_date = task.dag.previous_schedule(self.execution_date)
next_execution_date = task.dag.following_schedule(self.execution_date)
next_ds = None
if next_execution_date:
next_ds = next_execution_date.strftime('%Y-%m-%d')
prev_ds = None
if prev_execution_date:
prev_ds = prev_execution_date.strftime('%Y-%m-%d')
ds_nodash = ds.replace('-', '')
ts_nodash = ts.replace('-', '').replace(':', '')
yesterday_ds_nodash = yesterday_ds.replace('-', '')
tomorrow_ds_nodash = tomorrow_ds.replace('-', '')
ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}"
ti_key_str = ti_key_str.format(**locals())
params = {}
run_id = ''
dag_run = None
if hasattr(task, 'dag'):
if task.dag.params:
params.update(task.dag.params)
dag_run = (
session.query(DagRun)
.filter_by(
dag_id=task.dag.dag_id,
execution_date=self.execution_date)
.first()
)
run_id = dag_run.run_id if dag_run else None
session.expunge_all()
session.commit()
if task.params:
params.update(task.params)
class VariableAccessor:
"""
Wrapper around Variable. This way you can get variables in templates by using
{var.value.your_variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item)
return self.var
def __repr__(self):
return str(self.var)
class VariableJsonAccessor:
"""
Wrapper around deserialized Variables. This way you can get variables
in templates by using {var.json.your_variable_name}.
"""
def __init__(self):
self.var = None
def __getattr__(self, item):
self.var = Variable.get(item, deserialize_json=True)
return self.var
def __repr__(self):
return str(self.var)
return {
'dag': task.dag,
'ds': ds,
'next_ds': next_ds,
'prev_ds': prev_ds,
'ds_nodash': ds_nodash,
'ts': ts,
'ts_nodash': ts_nodash,
'yesterday_ds': yesterday_ds,
'yesterday_ds_nodash': yesterday_ds_nodash,
'tomorrow_ds': tomorrow_ds,
'tomorrow_ds_nodash': tomorrow_ds_nodash,
'END_DATE': ds,
'end_date': ds,
'dag_run': dag_run,
'run_id': run_id,
'execution_date': self.execution_date,
'prev_execution_date': prev_execution_date,
'next_execution_date': next_execution_date,
'latest_date': ds,
'macros': macros,
'params': params,
'tables': tables,
'task': task,
'task_instance': self,
'ti': self,
'task_instance_key_str': ti_key_str,
'conf': configuration,
'test_mode': self.test_mode,
'var': {
'value': VariableAccessor(),
'json': VariableJsonAccessor()
},
'inlets': task.inlets,
'outlets': task.outlets,
}
def render_templates(self):
task = self.task
jinja_context = self.get_template_context()
if hasattr(self, 'task') and hasattr(self.task, 'dag'):
if self.task.dag.user_defined_macros:
jinja_context.update(
self.task.dag.user_defined_macros)
rt = self.task.render_template # shortcut to method
for attr in task.__class__.template_fields:
content = getattr(task, attr)
if content:
rendered_content = rt(attr, content, jinja_context)
setattr(task, attr, rendered_content)
def email_alert(self, exception, is_retry=False):
task = self.task
title = "Airflow alert: {self}".format(**locals())
exception = str(exception).replace('\n', '<br>')
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
body = (
"Try {try_number} out of {max_tries}<br>"
"Exception:<br>{exception}<br>"
"Log: <a href='{self.log_url}'>Link</a><br>"
"Host: {self.hostname}<br>"
"Log file: {self.log_filepath}<br>"
"Mark success: <a href='{self.mark_success_url}'>Link</a><br>"
).format(try_number=self.try_number, max_tries=self.max_tries + 1, **locals())
send_email(task.email, title, body)
def set_duration(self):
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
def xcom_push(
self,
key,
value,
execution_date=None):
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: string
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date)
def xcom_pull(
self,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=False):
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: string
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: string or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: string
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
pull_fn = functools.partial(
XCom.get_one,
execution_date=self.execution_date,
key=key,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
if is_container(task_ids):
return tuple(pull_fn(task_id=t) for t in task_ids)
else:
return pull_fn(task_id=task_ids)
@provide_session
def get_num_running_task_instances(self, session):
TI = TaskInstance
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.state == State.RUNNING
).count()
def init_run_context(self, raw=False):
"""
Sets the log context.
"""
self.raw = raw
self._set_context(self)
class TaskFail(Base):
"""
TaskFail tracks the failed run durations of each task instance.
"""
__tablename__ = "task_fail"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
def __init__(self, task, execution_date, start_date, end_date):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.start_date = start_date
self.end_date = end_date
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
class Log(Base):
"""
Used to actively log events to the database
"""
__tablename__ = "log"
id = Column(Integer, primary_key=True)
dttm = Column(UtcDateTime)
dag_id = Column(String(ID_LEN))
task_id = Column(String(ID_LEN))
event = Column(String(30))
execution_date = Column(UtcDateTime)
owner = Column(String(500))
extra = Column(Text)
def __init__(self, event, task_instance, owner=None, extra=None, **kwargs):
self.dttm = timezone.utcnow()
self.event = event
self.extra = extra
task_owner = None
if task_instance:
self.dag_id = task_instance.dag_id
self.task_id = task_instance.task_id
self.execution_date = task_instance.execution_date
task_owner = task_instance.task.owner
if 'task_id' in kwargs:
self.task_id = kwargs['task_id']
if 'dag_id' in kwargs:
self.dag_id = kwargs['dag_id']
if 'execution_date' in kwargs:
if kwargs['execution_date']:
self.execution_date = kwargs['execution_date']
self.owner = owner or task_owner
class SkipMixin(LoggingMixin):
@provide_session
def skip(self, dag_run, execution_date, tasks, session=None):
"""
Sets tasks instances to skipped from the same dag run.
:param dag_run: the DagRun for which to set the tasks to skipped
:param execution_date: execution_date
:param tasks: tasks to skip (not task_ids)
:param session: db session to use
"""
if not tasks:
return
task_ids = [d.task_id for d in tasks]
now = timezone.utcnow()
if dag_run:
session.query(TaskInstance).filter(
TaskInstance.dag_id == dag_run.dag_id,
TaskInstance.execution_date == dag_run.execution_date,
TaskInstance.task_id.in_(task_ids)
).update({TaskInstance.state: State.SKIPPED,
TaskInstance.start_date: now,
TaskInstance.end_date: now},
synchronize_session=False)
session.commit()
else:
assert execution_date is not None, "Execution date is None and no dag run"
self.log.warning("No DAG RUN present this should not happen")
# this is defensive against dag runs that are not complete
for task in tasks:
ti = TaskInstance(task, execution_date=execution_date)
ti.state = State.SKIPPED
ti.start_date = now
ti.end_date = now
session.merge(ti)
session.commit()
@functools.total_ordering
class BaseOperator(LoggingMixin):
"""
Abstract base class for all operators. Since operators create objects that
become nodes in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: string
:param owner: the owner of the task, using the unix username is recommended
:type owner: string
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: timedelta
:param retry_exponential_backoff: allow progressive longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries
:type max_retry_delay: timedelta
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up.
:type priority_weight: int
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downtream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
DAGS. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``
:type weight_rule: str
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send an email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: callable
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | dummy}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
:param task_concurrency: When set, a task will be able to limit the concurrent
runs across execution_dates
:type task_concurrency: int
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
``example: to run this task in a specific docker container through
the KubernetesExecutor
MyOperator(...,
executor_config={
"KubernetesExecutor":
{"image": "myCustomDockerImage"}
}
)``
:type executor_config: dict
"""
# For derived classes to define which fields will get jinjaified
template_fields = []
# Defines which files extensions to look for in the templated fields
template_ext = []
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
@apply_defaults
def __init__(
self,
task_id,
owner=configuration.conf.get('operators', 'DEFAULT_OWNER'),
email=None,
email_on_retry=True,
email_on_failure=True,
retries=0,
retry_delay=timedelta(seconds=300),
retry_exponential_backoff=False,
max_retry_delay=None,
start_date=None,
end_date=None,
schedule_interval=None, # not hooked as of now
depends_on_past=False,
wait_for_downstream=False,
dag=None,
params=None,
default_args=None,
adhoc=False,
priority_weight=1,
weight_rule=WeightRule.DOWNSTREAM,
queue=configuration.conf.get('celery', 'default_queue'),
pool=None,
sla=None,
execution_timeout=None,
on_failure_callback=None,
on_success_callback=None,
on_retry_callback=None,
trigger_rule=TriggerRule.ALL_SUCCESS,
resources=None,
run_as_user=None,
task_concurrency=None,
executor_config=None,
inlets=None,
outlets=None,
*args,
**kwargs):
if args or kwargs:
# TODO remove *args and **kwargs in Airflow 2.0
warnings.warn(
'Invalid arguments were passed to {c}. Support for '
'passing such arguments will be dropped in Airflow 2.0. '
'Invalid arguments were:'
'\n*args: {a}\n**kwargs: {k}'.format(
c=self.__class__.__name__, a=args, k=kwargs),
category=PendingDeprecationWarning
)
validate_key(task_id)
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
self.log.warning("start_date for %s isn't datetime.datetime", self)
self.end_date = end_date
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
"The trigger_rule must be one of {all_triggers},"
"'{d}.{t}'; received '{tr}'."
.format(all_triggers=TriggerRule.all_triggers,
d=dag.dag_id if dag else "", t=task_id, tr=trigger_rule))
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if schedule_interval:
self.log.warning(
"schedule_interval is used for %s, though it has "
"been deprecated as a task parameter, you need to "
"specify it as a DAG parameter instead",
self
)
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
self.log.debug("Retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
self.params = params or {} # Available in templates!
self.adhoc = adhoc
self.priority_weight = priority_weight
if not WeightRule.is_valid(weight_rule):
raise AirflowException(
"The weight_rule must be one of {all_weight_rules},"
"'{d}.{t}'; received '{tr}'."
.format(all_weight_rules=WeightRule.all_weight_rules,
d=dag.dag_id if dag else "", t=task_id, tr=weight_rule))
self.weight_rule = weight_rule
self.resources = Resources(**(resources or {}))
self.run_as_user = run_as_user
self.task_concurrency = task_concurrency
self.executor_config = executor_config or {}
# Private attributes
self._upstream_task_ids = set()
self._downstream_task_ids = set()
if not dag and _CONTEXT_MANAGER_DAG:
dag = _CONTEXT_MANAGER_DAG
if dag:
self.dag = dag
self._log = logging.getLogger("airflow.task.operators")
# lineage
self.inlets = []
self.outlets = []
self.lineage_data = None
self._inlets = {
"auto": False,
"task_ids": [],
"datasets": [],
}
self._outlets = {
"datasets": [],
}
if inlets:
self._inlets.update(inlets)
if outlets:
self._outlets.update(outlets)
self._comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'schedule_interval',
'depends_on_past',
'wait_for_downstream',
'adhoc',
'priority_weight',
'sla',
'execution_timeout',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
}
def __eq__(self, other):
return (
type(self) == type(other) and
all(self.__dict__.get(c, None) == other.__dict__.get(c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.task_id < other.task_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Composing Operators -----------------------------------------------
def __rshift__(self, other):
"""
Implements Self >> Other == self.set_downstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_downstream(other)
return other
def __lshift__(self, other):
"""
Implements Self << Other == self.set_upstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_upstream(other)
return other
def __rrshift__(self, other):
"""
Called for [DAG] >> [Operator] because DAGs don't have
__rshift__ operators.
"""
self.__lshift__(other)
return self
def __rlshift__(self, other):
"""
Called for [DAG] << [Operator] because DAGs don't have
__lshift__ operators.
"""
self.__rshift__(other)
return self
# /Composing Operators ---------------------------------------------
@property
def dag(self):
"""
Returns the Operator's DAG if set, otherwise raises an error
"""
if self.has_dag():
return self._dag
else:
raise AirflowException(
'Operator {} has not been assigned to a DAG yet'.format(self))
@dag.setter
def dag(self, dag):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
if not isinstance(dag, DAG):
raise TypeError(
'Expected DAG; received {}'.format(dag.__class__.__name__))
elif self.has_dag() and self.dag is not dag:
raise AirflowException(
"The DAG assigned to {} can not be changed.".format(self))
elif self.task_id not in dag.task_dict:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""
Returns True if the Operator has been assigned to a DAG.
"""
return getattr(self, '_dag', None) is not None
@property
def dag_id(self):
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
@property
def deps(self):
"""
Returns the list of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
return {
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
}
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if self.has_dag():
return self.dag._schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
return self.priority_weight + sum(
map(lambda task_id: self._dag.task_dict[task_id].priority_weight,
self.get_flat_relative_ids(upstream=upstream))
)
@prepare_lineage
def pre_execute(self, context):
"""
This hook is triggered right before self.execute() is called.
"""
pass
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
@apply_lineage
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
pass
def on_kill(self):
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
pass
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters',
'params', '_log'):
setattr(result, k, copy.deepcopy(v, memo))
result.params = self.params
if hasattr(self, 'user_defined_macros'):
result.user_defined_macros = self.user_defined_macros
if hasattr(self, 'user_defined_filters'):
result.user_defined_filters = self.user_defined_filters
if hasattr(self, '_log'):
result._log = self._log
return result
def __getstate__(self):
state = dict(self.__dict__)
del state['_log']
return state
def __setstate__(self, state):
self.__dict__ = state
self._log = logging.getLogger("airflow.task.operators")
def render_template_from_field(self, attr, content, context, jinja_env):
"""
Renders a template from a field. If the field is a string, it will
simply render the string and return the result. If it is a collection or
nested set of collections, it will traverse the structure and render
all strings in it.
"""
rt = self.render_template
if isinstance(content, six.string_types):
result = jinja_env.from_string(content).render(**context)
elif isinstance(content, (list, tuple)):
result = [rt(attr, e, context) for e in content]
elif isinstance(content, numbers.Number):
result = content
elif isinstance(content, dict):
result = {
k: rt("{}[{}]".format(attr, k), v, context)
for k, v in list(content.items())}
else:
param_type = type(content)
msg = (
"Type '{param_type}' used for parameter '{attr}' is "
"not supported for templating").format(**locals())
raise AirflowException(msg)
return result
def render_template(self, attr, content, context):
"""
Renders a template either from a file or directly in a field, and returns
the rendered result.
"""
jinja_env = self.dag.get_template_env() \
if hasattr(self, 'dag') \
else jinja2.Environment(cache_size=0)
exts = self.__class__.template_ext
if (
isinstance(content, six.string_types) and
any([content.endswith(ext) for ext in exts])):
return jinja_env.get_template(content).render(**context)
else:
return self.render_template_from_field(attr, content, context, jinja_env)
def prepare_template(self):
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
pass
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if content is not None and \
isinstance(content, six.string_types) and \
any([content.endswith(ext) for ext in self.template_ext]):
env = self.dag.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
self.log.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self):
return self._upstream_task_ids
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self):
return self._downstream_task_ids
@provide_session
def clear(self,
start_date=None,
end_date=None,
upstream=False,
downstream=False,
session=None):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry.all(), session, dag=self.dag)
session.commit()
return count
def get_task_instances(self, session, start_date=None, end_date=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
TI = TaskInstance
end_date = end_date or timezone.utcnow()
return session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.task_id == self.task_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
).order_by(TI.execution_date).all()
def get_flat_relative_ids(self, upstream=False, found_descendants=None):
"""
Get a flat list of relatives' ids, either upstream or downstream.
"""
if not found_descendants:
found_descendants = set()
relative_ids = self.get_direct_relative_ids(upstream)
for relative_id in relative_ids:
if relative_id not in found_descendants:
found_descendants.add(relative_id)
relative_task = self._dag.task_dict[relative_id]
relative_task.get_flat_relative_ids(upstream,
found_descendants)
return found_descendants
def get_flat_relatives(self, upstream=False):
"""
Get a flat list of relatives, either upstream or downstream.
"""
return list(map(lambda task_id: self._dag.task_dict[task_id],
self.get_flat_relative_ids(upstream)))
def run(
self,
start_date=None,
end_date=None,
ignore_first_depends_on_past=False,
ignore_ti_state=False,
mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or timezone.utcnow()
for dt in self.dag.date_range(start_date, end_date=end_date):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_depends_on_past=(
dt == start_date and ignore_first_depends_on_past),
ignore_ti_state=ignore_ti_state)
def dry_run(self):
self.log.info('Dry run')
for attr in self.template_fields:
content = getattr(self, attr)
if content and isinstance(content, six.string_types):
self.log.info('Rendering template for %s', attr)
self.log.info(content)
def get_direct_relative_ids(self, upstream=False):
"""
Get the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self._upstream_task_ids
else:
return self._downstream_task_ids
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(
self=self)
@property
def task_type(self):
return self.__class__.__name__
def add_only_new(self, item_set, item):
if item in item_set:
raise AirflowException(
'Dependency {self}, {item} already registered'
''.format(**locals()))
else:
item_set.add(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for t in task_list:
if not isinstance(t, BaseOperator):
raise AirflowException(
"Relationships can only be set between "
"Operators; received {}".format(t.__class__.__name__))
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = {t._dag.dag_id: t._dag for t in [self] + task_list if t.has_dag()}
if len(dags) > 1:
raise AirflowException(
'Tried to set relationships between tasks in '
'more than one DAG: {}'.format(dags.values()))
elif len(dags) == 1:
dag = dags.popitem()[1]
else:
raise AirflowException(
"Tried to create relationships between tasks that don't have "
"DAGs yet. Set the DAG for at least one "
"task and try again: {}".format([self] + task_list))
if dag and not self.has_dag():
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
task.dag = dag
if upstream:
task.add_only_new(task._downstream_task_ids, self.task_id)
self.add_only_new(self._upstream_task_ids, task.task_id)
else:
self.add_only_new(self._downstream_task_ids, task.task_id)
task.add_only_new(task._upstream_task_ids, self.task_id)
def set_downstream(self, task_or_task_list):
"""
Set a task or a task list to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task or a task list to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
def xcom_push(
self,
context,
key,
value,
execution_date=None):
"""
See TaskInstance.xcom_push()
"""
context['ti'].xcom_push(
key=key,
value=value,
execution_date=execution_date)
def xcom_pull(
self,
context,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=None):
"""
See TaskInstance.xcom_pull()
"""
return context['ti'].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
class DagModel(Base):
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = configuration.conf\
.getboolean('core',
'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(UtcDateTime)
# Last time this DAG was pickled
last_pickled = Column(UtcDateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(UtcDateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
@classmethod
@provide_session
def get_current(cls, dag_id, session=None):
return session.query(cls).filter(cls.dag_id == dag_id).first()
@functools.total_ordering
class DAG(BaseDag, LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start end an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG
:type dag_id: string
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: string
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: string or list of stings
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration,
gantt, landing_times)
:type default_view: string
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
:type orientation: string
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
:param on_failure_callback: A function to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:type on_failure_callback: callable
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:type on_success_callback: callable
"""
def __init__(
self, dag_id,
description='',
schedule_interval=timedelta(days=1),
start_date=None, end_date=None,
full_filepath=None,
template_searchpath=None,
user_defined_macros=None,
user_defined_filters=None,
default_args=None,
concurrency=configuration.conf.getint('core', 'dag_concurrency'),
max_active_runs=configuration.conf.getint(
'core', 'max_active_runs_per_dag'),
dagrun_timeout=None,
sla_miss_callback=None,
default_view=configuration.conf.get('webserver', 'dag_default_view').lower(),
orientation=configuration.conf.get('webserver', 'dag_orientation'),
catchup=configuration.conf.getboolean('scheduler', 'catchup_by_default'),
on_success_callback=None, on_failure_callback=None,
params=None):
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = default_args or {}
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
# Properties from BaseDag
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id = None
self._description = description
# set file location to caller source path
self.fileloc = sys._getframe().f_back.f_code.co_filename
self.task_dict = dict()
# set timezone
if start_date and start_date.tzinfo:
self.timezone = start_date.tzinfo
elif 'start_date' in self.default_args and self.default_args['start_date']:
if isinstance(self.default_args['start_date'], six.string_types):
self.default_args['start_date'] = (
timezone.parse(self.default_args['start_date'])
)
self.timezone = self.default_args['start_date'].tzinfo
else:
self.timezone = settings.TIMEZONE
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
# also convert tasks
if 'start_date' in self.default_args:
self.default_args['start_date'] = (
timezone.convert_to_utc(self.default_args['start_date'])
)
if 'end_date' in self.default_args:
self.default_args['end_date'] = (
timezone.convert_to_utc(self.default_args['end_date'])
)
self.schedule_interval = schedule_interval
if schedule_interval in cron_presets:
self._schedule_interval = cron_presets.get(schedule_interval)
elif schedule_interval == '@once':
self._schedule_interval = None
else:
self._schedule_interval = schedule_interval
if isinstance(template_searchpath, six.string_types):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.parent_dag = None # Gets set when DAGs are loaded
self.last_loaded = timezone.utcnow()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
self.default_view = default_view
self.orientation = orientation
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self.on_success_callback = on_success_callback
self.on_failure_callback = on_failure_callback
self._comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
def __repr__(self):
return "<DAG: {self.dag_id}>".format(self=self)
def __eq__(self, other):
return (
type(self) == type(other) and
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
all(getattr(self, c, None) == getattr(other, c, None)
for c in self._comps))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
global _CONTEXT_MANAGER_DAG
self._old_context_manager_dag = _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self
return self
def __exit__(self, _type, _value, _tb):
global _CONTEXT_MANAGER_DAG
_CONTEXT_MANAGER_DAG = self._old_context_manager_dag
# /Context Manager ----------------------------------------------
def date_range(self, start_date, num=None, end_date=timezone.utcnow()):
if num:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date,
num=num, delta=self._schedule_interval)
def following_schedule(self, dttm):
"""
Calculates the following schedule for this dag in local time
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, six.string_types):
dttm = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, dttm)
following = timezone.make_aware(cron.get_next(datetime), self.timezone)
return timezone.convert_to_utc(following)
elif isinstance(self._schedule_interval, timedelta):
return dttm + self._schedule_interval
def previous_schedule(self, dttm):
"""
Calculates the previous schedule for this dag in local time
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self._schedule_interval, six.string_types):
dttm = timezone.make_naive(dttm, self.timezone)
cron = croniter(self._schedule_interval, dttm)
prev = timezone.make_aware(cron.get_prev(datetime), self.timezone)
return timezone.convert_to_utc(prev)
elif isinstance(self._schedule_interval, timedelta):
return dttm - self._schedule_interval
def get_run_dates(self, start_date, end_date=None):
"""
Returns a list of dates between the interval received as parameter using this
dag's schedule interval. Returned dates can be used for execution dates.
:param start_date: the start date of the interval
:type start_date: datetime
:param end_date: the end date of the interval, defaults to timezone.utcnow()
:type end_date: datetime
:return: a list of dates within the interval following the dag's schedule
:rtype: list
"""
run_dates = []
using_start_date = start_date
using_end_date = end_date
# dates for dag runs
using_start_date = using_start_date or min([t.start_date for t in self.tasks])
using_end_date = using_end_date or timezone.utcnow()
# next run date for a subdag isn't relevant (schedule_interval for subdags
# is ignored) so we use the dag run's start date in the case of a subdag
next_run_date = (self.normalize_schedule(using_start_date)
if not self.is_subdag else using_start_date)
while next_run_date and next_run_date <= using_end_date:
run_dates.append(next_run_date)
next_run_date = self.following_schedule(next_run_date)
return run_dates
def normalize_schedule(self, dttm):
"""
Returns dttm + interval unless dttm is first interval then it returns dttm
"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
"""
Returns the last dag run for this dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored
"""
DR = DagRun
qry = session.query(DR).filter(
DR.dag_id == self.dag_id,
)
if not include_externally_triggered:
qry = qry.filter(DR.external_trigger.__eq__(False))
qry = qry.order_by(DR.execution_date.desc())
last = qry.first()
return last
@property
def dag_id(self):
return self._dag_id
@dag_id.setter
def dag_id(self, value):
self._dag_id = value
@property
def full_filepath(self):
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value):
self._full_filepath = value
@property
def concurrency(self):
return self._concurrency
@concurrency.setter
def concurrency(self, value):
self._concurrency = value
@property
def description(self):
return self._description
@property
def pickle_id(self):
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value):
self._pickle_id = value
@property
def tasks(self):
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError(
'DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self):
return list(self.task_dict.keys())
@property
def active_task_ids(self):
return list(k for k, v in self.task_dict.items() if not v.adhoc)
@property
def active_tasks(self):
return [t for t in self.tasks if not t.adhoc]
@property
def filepath(self):
"""
File location of where the dag object is instantiated
"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self):
"""
Folder location of where the dag object is instantiated
"""
return os.path.dirname(self.full_filepath)
@property
def owner(self):
return ", ".join(list(set([t.owner for t in self.tasks])))
@property
@provide_session
def concurrency_reached(self, session=None):
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
@provide_session
def is_paused(self, session=None):
"""
Returns a boolean indicating whether this DAG is paused
"""
qry = session.query(DagModel).filter(
DagModel.dag_id == self.dag_id)
return qry.value('is_paused')
@provide_session
def handle_callback(self, dagrun, success=True, reason=None, session=None):
"""
Triggers the appropriate callback depending on the value of success, namely the
on_failure_callback or on_success_callback. This method gets the context of a
single TaskInstance part of this DagRun and passes that to the callable along
with a 'reason', primarily to differentiate DagRun failures.
.. note::
The logs end up in $AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log
:param dagrun: DagRun object
:param success: Flag to specify if failure or success callback should be called
:param reason: Completion reason
:param session: Database session
"""
callback = self.on_success_callback if success else self.on_failure_callback
if callback:
self.log.info('Executing dag callback function: {}'.format(callback))
tis = dagrun.get_task_instances(session=session)
ti = tis[-1] # get first TaskInstance of DagRun
ti.task = self.get_task(ti.task_id)
context = ti.get_template_context(session=session)
context.update({'reason': reason})
callback(context)
@provide_session
def get_active_runs(self, session=None):
"""
Returns a list of dag run execution dates currently running
:param session:
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_num_active_runs(self, external_trigger=None, session=None):
"""
Returns the number of active "running" dag runs
:param external_trigger: True for externally triggered active dag runs
:type external_trigger: bool
:param session:
:return: number greater than 0 for active dag runs
"""
query = (session
.query(DagRun)
.filter(DagRun.dag_id == self.dag_id)
.filter(DagRun.state == State.RUNNING))
if external_trigger is not None:
query = query.filter(DagRun.external_trigger == external_trigger)
return query.count()
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == execution_date)
.first())
return dagrun
@property
@provide_session
def latest_execution_date(self, session=None):
"""
Returns the latest date for which at least one dag run exists
"""
execution_date = session.query(func.max(DagRun.execution_date)).filter(
DagRun.dag_id == self.dag_id
).scalar()
return execution_date
@property
def subdags(self):
"""
Returns a list of the subdag objects associated to this DAG
"""
# Check SubDag for class but don't check class directly, see
# https://github.com/airbnb/airflow/issues/1168
from airflow.operators.subdag_operator import SubDagOperator
subdag_lst = []
for task in self.tasks:
if (isinstance(task, SubDagOperator) or
# TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator'):
subdag_lst.append(task.subdag)
subdag_lst += task.subdag.subdags
return subdag_lst
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self):
"""
Returns a jinja2 Environment while taking into account the DAGs
template_searchpath, user_defined_macros and user_defined_filters
"""
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath),
extensions=["jinja2.ext.do"],
cache_size=0)
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(
self.get_task(downstream_task_id))
def get_task_instances(
self, session, start_date=None, end_date=None, state=None):
TI = TaskInstance
if not start_date:
start_date = (timezone.utcnow() - timedelta(30)).date()
start_date = datetime.combine(start_date, datetime.min.time())
end_date = end_date or timezone.utcnow()
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date >= start_date,
TI.execution_date <= end_date,
TI.task_id.in_([t.task_id for t in self.tasks]),
)
if state:
tis = tis.filter(TI.state == state)
tis = tis.order_by(TI.execution_date).all()
return tis
@property
def roots(self):
return [t for t in self.tasks if not t.downstream_list]
def topological_sort(self):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:return: list of tasks in topological order
"""
# copy the the tasks so we leave it unmodified
graph_unsorted = self.tasks[:]
graph_sorted = []
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to bail out as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted):
for edge in node.upstream_list:
if edge in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
graph_unsorted.remove(node)
graph_sorted.append(node)
if not acyclic:
raise AirflowException("A cyclic dependency occurred in dag: {}"
.format(self.dag_id))
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self, state=State.RUNNING, session=None):
drs = session.query(DagModel).filter_by(dag_id=self.dag_id).all()
dirty_ids = []
for dr in drs:
dr.state = state
dirty_ids.append(dr.dag_id)
DagStat.update(dirty_ids, session=session)
@provide_session
def clear(
self, start_date=None, end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False,
session=None):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
"""
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append(
TI.dag_id.like(dag.dag_id) &
TI.task_id.in_(dag.task_ids)
)
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(TI.state == State.FAILED)
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if dry_run:
tis = tis.all()
session.expunge_all()
return tis
count = tis.count()
do_it = True
if count == 0:
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n"
"{ti_list}\n\n"
"Are you sure? (yes/no): ").format(**locals())
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(tis.all(), session, dag=self)
if reset_dag_runs:
self.set_dag_runs_state(session=session)
else:
count = 0
print("Bail. Nothing was cleared.")
session.commit()
return count
@classmethod
def clear_dags(
cls, dags,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
reset_dag_runs=True,
dry_run=False):
all_tis = []
for dag in dags:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
reset_dag_runs=reset_dag_runs,
dry_run=True)
all_tis.extend(tis)
if dry_run:
return all_tis
count = len(all_tis)
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in all_tis])
question = (
"You are about to delete these {} tasks:\n"
"{}\n\n"
"Are you sure? (yes/no): ").format(count, ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
for dag in dags:
dag.clear(start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
reset_dag_runs=reset_dag_runs,
dry_run=False)
else:
count = 0
print("Bail. Nothing was cleared.")
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k not in ('user_defined_macros', 'user_defined_filters', 'params'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
return result
def sub_dag(self, task_regex, include_downstream=False,
include_upstream=True):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
"""
dag = copy.deepcopy(self)
regex_match = [
t for t in dag.tasks if re.findall(task_regex, t.task_id)]
also_include = []
for t in regex_match:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
# Compiling the unique list of tasks that made the cut
dag.task_dict = {t.task_id: t for t in regex_match + also_include}
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# made the cut
t._upstream_task_ids = t._upstream_task_ids.intersection(dag.task_dict.keys())
t._downstream_task_ids = t._downstream_task_ids.intersection(
dag.task_dict.keys())
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id):
if task_id in self.task_dict:
return self.task_dict[task_id]
raise AirflowException("Task {task_id} not found".format(**locals()))
@provide_session
def pickle_info(self, session=None):
d = {}
d['is_picklable'] = True
try:
dttm = timezone.utcnow()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = "{}".format(timezone.utcnow() - dttm)
except Exception as e:
self.log.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None):
dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(
DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = timezone.utcnow()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self):
"""
Shows an ascii tree representation of the DAG
"""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.upstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if task.task_id in self.task_dict:
# TODO: raise an error in Airflow 2.0
warnings.warn(
'The requested task could not be added to the DAG because a '
'task with task_id {} is already in the DAG. Starting in '
'Airflow 2.0, trying to overwrite a task will raise an '
'exception.'.format(task.task_id),
category=PendingDeprecationWarning)
else:
self.task_dict[task.task_id] = task
task.dag = self
self.task_count = len(self.task_dict)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
@provide_session
def db_merge(self, session=None):
BO = BaseOperator
tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all()
for t in tasks:
session.delete(t)
session.commit()
session.merge(self)
session.commit()
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
local=False,
executor=None,
donot_pickle=configuration.conf.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
):
"""
Runs the DAG.
:param start_date: the start date of the range to run
:type start_date: datetime
:param end_date: the end date of the range to run
:type end_date: datetime
:param mark_success: True to mark jobs as succeeded without running them
:type mark_success: bool
:param local: True to run the tasks using the LocalExecutor
:type local: bool
:param executor: The executor instance to run the tasks
:type executor: BaseExecutor
:param donot_pickle: True to avoid pickling DAG object and send to workers
:type donot_pickle: bool
:param ignore_task_deps: True to skip upstream tasks
:type ignore_task_deps: bool
:param ignore_first_depends_on_past: True to ignore depends_on_past
dependencies for the first set of tasks only
:type ignore_first_depends_on_past: bool
:param pool: Resource pool to use
:type pool: string
:param delay_on_limit_secs: Time in seconds to wait before next attempt to run
dag run when max_active_runs limit has been reached
:type delay_on_limit_secs: float
:param verbose: Make logging output more verbose
:type verbose: boolean
"""
from airflow.jobs import BackfillJob
if not executor and local:
executor = LocalExecutor()
elif not executor:
executor = GetDefaultExecutor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool,
delay_on_limit_secs=delay_on_limit_secs,
verbose=verbose,
)
job.run()
def cli(self):
"""
Exposes a CLI specific to this DAG
"""
from airflow.bin import cli
parser = cli.CLIFactory.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(self,
run_id,
state,
execution_date=None,
start_date=None,
external_trigger=False,
conf=None,
session=None):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date of this dag run
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param session: database session
:type session: Session
"""
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state
)
session.add(run)
DagStat.set_dirty(dag_id=self.dag_id, session=session)
session.commit()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
run.refresh_from_db()
return run
@provide_session
def sync_to_db(self, owner=None, sync_time=None, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:param dag: the DAG object to save to the DB
:type dag: DAG
:param sync_time: The time that the DAG should be marked as sync'ed
:type sync_time: datetime
:return: None
"""
if owner is None:
owner = self.owner
if sync_time is None:
sync_time = timezone.utcnow()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == self.dag_id).first()
if not orm_dag:
orm_dag = DagModel(dag_id=self.dag_id)
self.log.info("Creating ORM DAG for %s", self.dag_id)
orm_dag.fileloc = self.fileloc
orm_dag.is_subdag = self.is_subdag
orm_dag.owners = owner
orm_dag.is_active = True
orm_dag.last_scheduler_run = sync_time
session.merge(orm_dag)
session.commit()
for subdag in self.subdags:
subdag.sync_to_db(owner=owner, sync_time=sync_time, session=session)
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(
DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
log = LoggingMixin().log
for dag in session.query(
DagModel).filter(DagModel.last_scheduler_run < expiration_date,
DagModel.is_active).all():
log.info(
"Deactivating DAG ID %s since it was last touched by the scheduler at %s",
dag.dag_id, dag.last_scheduler_run.isoformat()
)
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id.in_(task_ids))
if states is not None:
if None in states:
qry = qry.filter(or_(
TaskInstance.state.in_(states),
TaskInstance.state.is_(None)))
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
def test_cycle(self):
"""
Check to see if there are any cycles in the DAG. Returns False if no cycle found,
otherwise raises exception.
"""
# default of int is 0 which corresponds to CYCLE_NEW
visit_map = defaultdict(int)
for task_id in self.task_dict.keys():
# print('starting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_NEW:
self._test_cycle_helper(visit_map, task_id)
return False
def _test_cycle_helper(self, visit_map, task_id):
"""
Checks if a cycle exists from the input task using DFS traversal
"""
# print('Inspecting %s' % task_id)
if visit_map[task_id] == DagBag.CYCLE_DONE:
return False
visit_map[task_id] = DagBag.CYCLE_IN_PROGRESS
task = self.task_dict[task_id]
for descendant_id in task.get_direct_relative_ids():
if visit_map[descendant_id] == DagBag.CYCLE_IN_PROGRESS:
msg = "Cycle detected in DAG. Faulty task: {0} to {1}".format(
task_id, descendant_id)
raise AirflowDagCycleException(msg)
else:
self._test_cycle_helper(visit_map, descendant_id)
visit_map[task_id] = DagBag.CYCLE_DONE
class Chart(Base):
__tablename__ = "chart"
id = Column(Integer, primary_key=True)
label = Column(String(200))
conn_id = Column(String(ID_LEN), nullable=False)
user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
chart_type = Column(String(100), default="line")
sql_layout = Column(String(50), default="series")
sql = Column(Text, default="SELECT series, x, y FROM table")
y_log_scale = Column(Boolean)
show_datatable = Column(Boolean)
show_sql = Column(Boolean, default=True)
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
"User", cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(UtcDateTime, default=timezone.utcnow)
def __repr__(self):
return self.label
class KnownEventType(Base):
__tablename__ = "known_event_type"
id = Column(Integer, primary_key=True)
know_event_type = Column(String(200))
def __repr__(self):
return self.know_event_type
class KnownEvent(Base):
__tablename__ = "known_event"
id = Column(Integer, primary_key=True)
label = Column(String(200))
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
user_id = Column(Integer(), ForeignKey('users.id'),)
known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),)
reported_by = relationship(
"User", cascade=False, cascade_backrefs=False, backref='known_events')
event_type = relationship(
"KnownEventType",
cascade=False,
cascade_backrefs=False, backref='known_events')
description = Column(Text)
def __repr__(self):
return self.label
class Variable(Base, LoggingMixin):
__tablename__ = "variable"
id = Column(Integer, primary_key=True)
key = Column(String(ID_LEN), unique=True)
_val = Column('val', Text)
is_encrypted = Column(Boolean, unique=False, default=False)
def __repr__(self):
# Hiding the value
return '{} : {}'.format(self.key, self._val)
def get_val(self):
log = LoggingMixin().log
if self._val and self.is_encrypted:
try:
fernet = get_fernet()
except Exception:
log.error("Can't decrypt _val for key={}, FERNET_KEY "
"configuration missing".format(self.key))
return None
try:
return fernet.decrypt(bytes(self._val, 'utf-8')).decode()
except cryptography.fernet.InvalidToken:
log.error("Can't decrypt _val for key={}, invalid token "
"or value".format(self.key))
return None
else:
return self._val
def set_val(self, value):
if value:
try:
fernet = get_fernet()
self._val = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = True
except AirflowException:
self.log.exception(
"Failed to load fernet while encrypting value, "
"using non-encrypted value."
)
self._val = value
self.is_encrypted = False
@declared_attr
def val(cls):
return synonym('_val',
descriptor=property(cls.get_val, cls.set_val))
@classmethod
def setdefault(cls, key, default, deserialize_json=False):
"""
Like a Python builtin dict object, setdefault returns the current value
for a key, and if it isn't there, stores the default value and returns it.
:param key: Dict key for this Variable
:type key: String
:param default: Default value to set and return if the variable
isn't already in the DB
:type default: Mixed
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:return: Mixed
"""
default_sentinel = object()
obj = Variable.get(key, default_var=default_sentinel,
deserialize_json=deserialize_json)
if obj is default_sentinel:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
@classmethod
@provide_session
def get(cls, key, default_var=None, deserialize_json=False, session=None):
obj = session.query(cls).filter(cls.key == key).first()
if obj is None:
if default_var is not None:
return default_var
else:
raise KeyError('Variable {} does not exist'.format(key))
else:
if deserialize_json:
return json.loads(obj.val)
else:
return obj.val
@classmethod
@provide_session
def set(cls, key, value, serialize_json=False, session=None):
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = str(value)
session.query(cls).filter(cls.key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
class XCom(Base, LoggingMixin):
"""
Base class for XCom objects.
"""
__tablename__ = "xcom"
id = Column(Integer, primary_key=True)
key = Column(String(512))
value = Column(LargeBinary)
timestamp = Column(
DateTime, default=timezone.utcnow, nullable=False)
execution_date = Column(UtcDateTime, nullable=False)
# source information
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
__table_args__ = (
Index('idx_xcom_dag_task_date', dag_id, task_id, execution_date, unique=False),
)
"""
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
"""
@reconstructor
def init_on_load(self):
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
self.value = pickle.loads(self.value)
else:
try:
self.value = json.loads(self.value.decode('UTF-8'))
except (UnicodeEncodeError, ValueError):
# For backward-compatibility.
# Preventing errors in webserver
# due to XComs mixed with pickled and unpickled.
self.value = pickle.loads(self.value)
def __repr__(self):
return '<XCom "{key}" ({task_id} @ {execution_date})>'.format(
key=self.key,
task_id=self.task_id,
execution_date=self.execution_date)
@classmethod
@provide_session
def set(
cls,
key,
value,
execution_date,
task_id,
dag_id,
session=None):
"""
Store an XCom value.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: None
"""
session.expunge_all()
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
value = pickle.dumps(value)
else:
try:
value = json.dumps(value).encode('UTF-8')
except ValueError:
log = LoggingMixin().log
log.error("Could not serialize the XCOM value into JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key,
cls.execution_date == execution_date,
cls.task_id == task_id,
cls.dag_id == dag_id).delete()
session.commit()
# insert new XCom
session.add(XCom(
key=key,
value=value,
execution_date=execution_date,
task_id=task_id,
dag_id=dag_id))
session.commit()
@classmethod
@provide_session
def get_one(cls,
execution_date,
key=None,
task_id=None,
dag_id=None,
include_prior_dates=False,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria.
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
:return: XCom value
"""
filters = []
if key:
filters.append(cls.key == key)
if task_id:
filters.append(cls.task_id == task_id)
if dag_id:
filters.append(cls.dag_id == dag_id)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls.value).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc()))
result = query.first()
if result:
enable_pickling = configuration.getboolean('core', 'enable_xcom_pickling')
if enable_pickling:
return pickle.loads(result.value)
else:
try:
return json.loads(result.value.decode('UTF-8'))
except ValueError:
log = LoggingMixin().log
log.error("Could not deserialize the XCOM value from JSON. "
"If you are using pickles instead of JSON "
"for XCOM, then you need to enable pickle "
"support for XCOM in your airflow config.")
raise
@classmethod
@provide_session
def get_many(cls,
execution_date,
key=None,
task_ids=None,
dag_ids=None,
include_prior_dates=False,
limit=100,
session=None):
"""
Retrieve an XCom value, optionally meeting certain criteria
TODO: "pickling" has been deprecated and JSON is preferred.
"pickling" will be removed in Airflow 2.0.
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
filters.append(cls.task_id.in_(as_tuple(task_ids)))
if dag_ids:
filters.append(cls.dag_id.in_(as_tuple(dag_ids)))
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls).filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
.limit(limit))
results = query.all()
return results
@classmethod
@provide_session
def delete(cls, xcoms, session=None):
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(
'Expected XCom; received {}'.format(xcom.__class__.__name__)
)
session.delete(xcom)
session.commit()
class DagStat(Base):
__tablename__ = "dag_stats"
dag_id = Column(String(ID_LEN), primary_key=True)
state = Column(String(50), primary_key=True)
count = Column(Integer, default=0)
dirty = Column(Boolean, default=False)
def __init__(self, dag_id, state, count=0, dirty=False):
self.dag_id = dag_id
self.state = state
self.count = count
self.dirty = dirty
@staticmethod
@provide_session
def set_dirty(dag_id, session=None):
"""
:param dag_id: the dag_id to mark dirty
:param session: database session
:return:
"""
DagStat.create(dag_id=dag_id, session=session)
try:
stats = session.query(DagStat).filter(
DagStat.dag_id == dag_id
).with_for_update().all()
for stat in stats:
stat.dirty = True
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not update dag stats for %s", dag_id)
log.exception(e)
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True) # noqa
qry = qry.with_for_update().all()
ids = set([dag_stat.dag_id for dag_stat in qry])
# avoid querying with an empty IN clause
if len(ids) == 0:
session.commit()
return
dagstat_states = set(itertools.product(ids, State.dag_states))
qry = (
session.query(DagRun.dag_id, DagRun.state, func.count('*'))
.filter(DagRun.dag_id.in_(ids))
.group_by(DagRun.dag_id, DagRun.state)
)
counts = {(dag_id, state): count for dag_id, state, count in qry}
for dag_id, state in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[(dag_id, state)]
session.merge(
DagStat(dag_id=dag_id, state=state, count=count, dirty=False)
)
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not update dag stat table")
log.exception(e)
@staticmethod
@provide_session
def create(dag_id, session=None):
"""
Creates the missing states the stats table for the dag specified
:param dag_id: dag id of the dag to create stats for
:param session: database session
:return:
"""
# unfortunately sqlalchemy does not know upsert
qry = session.query(DagStat).filter(DagStat.dag_id == dag_id).all()
states = [dag_stat.state for dag_stat in qry]
for state in State.dag_states:
if state not in states:
try:
session.merge(DagStat(dag_id=dag_id, state=state))
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning("Could not create stat record")
log.exception(e)
class DagRun(Base, LoggingMixin):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
ID_PREFIX = 'scheduled__'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(UtcDateTime, default=timezone.utcnow)
start_date = Column(UtcDateTime, default=timezone.utcnow)
end_date = Column(UtcDateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dr_run_id', dag_id, run_id, unique=True),
)
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
if self.dag_id is not None:
# FIXME: Due to the scoped_session factor we we don't get a clean
# session here, so something really weird goes on:
# if you try to close the session dag runs will end up detached
session = settings.Session()
DagStat.set_dirty(self.dag_id, session=session)
@declared_attr
def state(self):
return synonym('_state',
descriptor=property(self.get_state, self.set_state))
@classmethod
def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX):
return prefix.format(date.isoformat()[:19])
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(dag_id=None, run_id=None, execution_date=None,
state=None, external_trigger=None, no_backfills=False,
session=None):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id to find dag runs for
:type dag_id: integer, list
:param run_id: defines the the run id for this dag run
:type run_id: string
:param execution_date: the execution date
:type execution_date: datetime
:param state: the state of the dag run
:type state: State
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: Session
"""
DR = DagRun
qry = session.query(DR)
if dag_id:
qry = qry.filter(DR.dag_id == dag_id)
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
from airflow.jobs import BackfillJob
qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
TI = TaskInstance
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, six.string_types):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
tis = tis.filter(
or_(TI.state.in_(state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
TI = TaskInstance
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: State
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
self.log.info("Updating state for %s considering %s task(s)", self, len(tis))
for ti in list(tis):
# skip in db?
if ti.state == State.REMOVED:
tis.remove(ti)
else:
ti.task = dag.get_task(ti.task_id)
# pre-calculate
# db is faster
start_dttm = timezone.utcnow()
unfinished_tasks = self.get_task_instances(
state=State.unfinished(),
session=session
)
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
# small speed up
if unfinished_tasks and none_depends_on_past and none_task_concurrency:
# todo: this can actually get pretty slow: one task costs between 0.01-015s
no_dependencies_met = True
for ut in unfinished_tasks:
# We need to flag upstream and check for changes because upstream
# failures can result in deadlock false positives
old_state = ut.state
deps_met = ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True),
session=session)
if deps_met or old_state != ut.current_state(session=session):
no_dependencies_met = False
break
duration = (timezone.utcnow() - start_dttm).total_seconds() * 1000
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
# future: remove the check on adhoc tasks (=active_tasks)
if len(tis) == len(dag.active_tasks):
root_ids = [t.task_id for t in dag.roots]
roots = [t for t in tis if t.task_id in root_ids]
# if all roots finished and at least one failed, the run failed
if (not unfinished_tasks and
any(r.state in (State.FAILED, State.UPSTREAM_FAILED) for r in roots)):
self.log.info('Marking run %s failed', self)
self.state = State.FAILED
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all roots succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(r.state in (State.SUCCESS, State.SKIPPED)
for r in roots):
self.log.info('Marking run %s successful', self)
self.state = State.SUCCESS
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and no_dependencies_met):
self.log.info('Deadlock; marking run %s failed', self)
self.state = State.FAILED
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.state = State.RUNNING
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return self.state
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
is_task_in_dag = task is not None
should_restore_task = is_task_in_dag and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in six.itervalues(dag.task_dict):
if task.adhoc:
continue
if task.task_id not in task_ids:
ti = TaskInstance(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
from airflow.jobs import BackfillJob
return self.run_id.startswith(BackfillJob.ID_PREFIX)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
class Pool(Base):
__tablename__ = "slot_pool"
id = Column(Integer, primary_key=True)
pool = Column(String(50), unique=True)
slots = Column(Integer, default=0)
description = Column(Text)
def __repr__(self):
return self.pool
def to_json(self):
return {
'id': self.id,
'pool': self.pool,
'slots': self.slots,
'description': self.description,
}
@provide_session
def used_slots(self, session):
"""
Returns the number of slots used at the moment
"""
running = (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.RUNNING)
.count()
)
return running
@provide_session
def queued_slots(self, session):
"""
Returns the number of slots used at the moment
"""
return (
session
.query(TaskInstance)
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == State.QUEUED)
.count()
)
@provide_session
def open_slots(self, session):
"""
Returns the number of slots open at the moment
"""
used_slots = self.used_slots(session=session)
queued_slots = self.queued_slots(session=session)
return self.slots - used_slots - queued_slots
class SlaMiss(Base):
"""
Model that stores a history of the SLA that have been missed.
It is used to keep track of SLA failures over time and to avoid double
triggering alert emails.
"""
__tablename__ = "sla_miss"
task_id = Column(String(ID_LEN), primary_key=True)
dag_id = Column(String(ID_LEN), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
email_sent = Column(Boolean, default=False)
timestamp = Column(UtcDateTime)
description = Column(Text)
notification_sent = Column(Boolean, default=False)
def __repr__(self):
return str((
self.dag_id, self.task_id, self.execution_date.isoformat()))
class ImportError(Base):
__tablename__ = "import_error"
id = Column(Integer, primary_key=True)
timestamp = Column(UtcDateTime)
filename = Column(String(1024))
stacktrace = Column(Text)
class KubeResourceVersion(Base):
__tablename__ = "kube_resource_version"
one_row_id = Column(Boolean, server_default=sqltrue(), primary_key=True)
resource_version = Column(String(255))
@staticmethod
@provide_session
def get_current_resource_version(session=None):
(resource_version,) = session.query(KubeResourceVersion.resource_version).one()
return resource_version
@staticmethod
@provide_session
def checkpoint_resource_version(resource_version, session=None):
if resource_version:
session.query(KubeResourceVersion).update({
KubeResourceVersion.resource_version: resource_version
})
session.commit()
@staticmethod
@provide_session
def reset_resource_version(session=None):
session.query(KubeResourceVersion).update({
KubeResourceVersion.resource_version: '0'
})
session.commit()
return '0'
class KubeWorkerIdentifier(Base):
__tablename__ = "kube_worker_uuid"
one_row_id = Column(Boolean, server_default=sqltrue(), primary_key=True)
worker_uuid = Column(String(255))
@staticmethod
@provide_session
def get_or_create_current_kube_worker_uuid(session=None):
(worker_uuid,) = session.query(KubeWorkerIdentifier.worker_uuid).one()
if worker_uuid == '':
worker_uuid = str(uuid.uuid4())
KubeWorkerIdentifier.checkpoint_kube_worker_uuid(worker_uuid, session)
return worker_uuid
@staticmethod
@provide_session
def checkpoint_kube_worker_uuid(worker_uuid, session=None):
if worker_uuid:
session.query(KubeWorkerIdentifier).update({
KubeWorkerIdentifier.worker_uuid: worker_uuid
})
session.commit()
| 36.742813 | 90 | 0.59353 |
bd45c92f735e8dd8af1506e1ee47af7870a38442 | 5,804 | py | Python | release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewCellCollection.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewCellCollection.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Forms/__init___parts/DataGridViewCellCollection.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | class DataGridViewCellCollection(BaseCollection,ICollection,IEnumerable,IList):
"""
Represents a collection of cells in a System.Windows.Forms.DataGridViewRow.
DataGridViewCellCollection(dataGridViewRow: DataGridViewRow)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return DataGridViewCellCollection()
def Add(self,dataGridViewCell):
"""
Add(self: DataGridViewCellCollection,dataGridViewCell: DataGridViewCell) -> int
Adds a cell to the collection.
dataGridViewCell: A System.Windows.Forms.DataGridViewCell to add to the collection.
Returns: The position in which to insert the new element.
"""
pass
def AddRange(self,dataGridViewCells):
"""
AddRange(self: DataGridViewCellCollection,*dataGridViewCells: Array[DataGridViewCell])
Adds an array of cells to the collection.
dataGridViewCells: The array of System.Windows.Forms.DataGridViewCell objects to add to the collection.
"""
pass
def Clear(self):
"""
Clear(self: DataGridViewCellCollection)
Clears all cells from the collection.
"""
pass
def Contains(self,dataGridViewCell):
"""
Contains(self: DataGridViewCellCollection,dataGridViewCell: DataGridViewCell) -> bool
Determines whether the specified cell is contained in the collection.
dataGridViewCell: A System.Windows.Forms.DataGridViewCell to locate in the collection.
Returns: true if dataGridViewCell is in the collection; otherwise,false.
"""
pass
def CopyTo(self,*__args):
"""
CopyTo(self: DataGridViewCellCollection,array: Array[DataGridViewCell],index: int)
Copies the entire collection of cells into an array at a specified location within the array.
array: The destination array to which the contents will be copied.
index: The index of the element in array at which to start copying.
"""
pass
def IndexOf(self,dataGridViewCell):
"""
IndexOf(self: DataGridViewCellCollection,dataGridViewCell: DataGridViewCell) -> int
Returns the index of the specified cell.
dataGridViewCell: The cell to locate in the collection.
Returns: The zero-based index of the value of dataGridViewCell parameter,if it is found in the collection; otherwise,-1.
"""
pass
def Insert(self,index,dataGridViewCell):
"""
Insert(self: DataGridViewCellCollection,index: int,dataGridViewCell: DataGridViewCell)
Inserts a cell into the collection at the specified index.
index: The zero-based index at which to place dataGridViewCell.
dataGridViewCell: The System.Windows.Forms.DataGridViewCell to insert.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def OnCollectionChanged(self,*args):
"""
OnCollectionChanged(self: DataGridViewCellCollection,e: CollectionChangeEventArgs)
Raises the System.Windows.Forms.DataGridViewCellCollection.CollectionChanged event.
e: A System.ComponentModel.CollectionChangeEventArgs that contains the event data.
"""
pass
def Remove(self,cell):
"""
Remove(self: DataGridViewCellCollection,cell: DataGridViewCell)
Removes the specified cell from the collection.
cell: The System.Windows.Forms.DataGridViewCell to remove from the collection.
"""
pass
def RemoveAt(self,index):
"""
RemoveAt(self: DataGridViewCellCollection,index: int)
Removes the cell at the specified index.
index: The zero-based index of the System.Windows.Forms.DataGridViewCell to be removed.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,false.
"""
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
@staticmethod
def __new__(self,dataGridViewRow):
""" __new__(cls: type,dataGridViewRow: DataGridViewRow) """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
List=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an System.Collections.ArrayList containing System.Windows.Forms.DataGridViewCellCollection objects.
"""
CollectionChanged=None
| 38.437086 | 402 | 0.721054 |
a98c5a95bd386988dbeeb855bc543196884b4ab5 | 311 | py | Python | twint/debug.py | mehrdad-shokri/twint | 17a1746c9210fc89181f2f773646bbb42fd05d7f | [
"MIT"
] | 3 | 2019-12-17T15:37:35.000Z | 2021-04-15T12:56:12.000Z | twint/debug.py | mehrdad-shokri/twint | 17a1746c9210fc89181f2f773646bbb42fd05d7f | [
"MIT"
] | null | null | null | twint/debug.py | mehrdad-shokri/twint | 17a1746c9210fc89181f2f773646bbb42fd05d7f | [
"MIT"
] | null | null | null | import datetime
import sys
def Write(e, func, output):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
msg = "[{}] {} - {}".format(now, func, e)
if output:
print(msg, file=open(output, "a", encoding="utf-8"))
print(msg, file=open("twint_debug.log", "a", encoding="utf-8"))
print(msg)
| 28.272727 | 65 | 0.607717 |
c5621f34dfcf975e034a5ac560697a3b9286a4a7 | 796 | py | Python | components/Reddit.py | tehstun/Reddit-Imgur-scraping | 9261c812bc306412924b59a5b463fd4640779d24 | [
"MIT"
] | 7 | 2016-12-29T13:59:08.000Z | 2019-04-04T10:36:11.000Z | components/Reddit.py | tehstun/Reddit-Imgur-scraping | 9261c812bc306412924b59a5b463fd4640779d24 | [
"MIT"
] | 1 | 2016-12-29T17:37:34.000Z | 2016-12-29T17:56:35.000Z | components/Reddit.py | tehstun/Reddit-Imgur-scraping | 9261c812bc306412924b59a5b463fd4640779d24 | [
"MIT"
] | 2 | 2016-12-29T03:35:40.000Z | 2016-12-29T06:50:31.000Z | # pylint: disable=W1401, C0301
import re
from feedparser import parse as feedparser
class Reddit:
""" A basic reddit class to do some information gathering """
def __init__(self):
self.reddit_core_url = "https://www.reddit.com/"
self.frontpage = self.gather_frontpage()
def gather_frontpage(self):
""" Gather all the front page reddit sub reddits names """
subreddits = []
reddit_rss_feed = feedparser(self.reddit_core_url + ".rss?limit=100")
for entry in reddit_rss_feed["entries"]:
entry_sub = re.search("(?P<url>https?://www.reddit.com/r/([A-z0-9\-]+))(\?[[^/]+)?", entry["summary"])
if entry_sub.group(2) not in subreddits:
subreddits.append(entry_sub.group(2))
return subreddits
| 36.181818 | 114 | 0.633166 |
32932bb636298cbeab10d3b990d5e50c7a849ca0 | 20,649 | py | Python | Python/python3_version/klampt/model/multipath.py | ipa-rmb-mr/Klampt | 71793b54eead788811b4e62bcf8dadb49b68ff17 | [
"BSD-3-Clause"
] | null | null | null | Python/python3_version/klampt/model/multipath.py | ipa-rmb-mr/Klampt | 71793b54eead788811b4e62bcf8dadb49b68ff17 | [
"BSD-3-Clause"
] | null | null | null | Python/python3_version/klampt/model/multipath.py | ipa-rmb-mr/Klampt | 71793b54eead788811b4e62bcf8dadb49b68ff17 | [
"BSD-3-Clause"
] | null | null | null | """This module defines the MultiPath class, and methods for loading and
saving multipaths from xml files.
"""
from ..model.contact import Hold
from ..io.loader import *
from ..math import vectorops
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from xml.dom import minidom
class MultiPath:
"""A sophisticated path representation that allows timed/untimed paths, attached
velocity information, as well as making and breaking contact.
Primarily, a MultiPath consists of a list of Sections, each of which is a path or
timed trajectory along a fixed stance.
A Section can either contain the Holds defining its stance, or its stance could be
defined by indices into the holdSet member of MultiPath. If all Sections have the
latter structure it is known as an "aggregated" MultiPath.
Attributes:
sections (list of Sections): the segments of the multipath, each operating over a
fixed stance.
settings (dict mapping str to str): an unstructured propery map 'settings' which
can contain metadata about which robot this path applies to, how the path
was created, etc.
holdSet (dict mapping int to Hold): a set of indexed Holds, which can be referred
to inside Section objects.
"""
class Section:
"""Contains a path or time-parameterized trajectory, as well as a
list of holds and ik constraints
If the times member is set, this is time parameterized. Otherwise,
it is just a path.
Attributes:
settings (dict mapping str to str): an unstructured property map.
configs (list of lists of floats): a list of N configurations along the
path section.
velocities (list of lists of floats, optional): a list of N joint
velocities along the path section.
times (list of floats, optional) a list of N times for each configuration
along the path section.
holds (list of Holds, optional): the set of Holds that this section
is required to meet.
holdIndices (list of ints, optional): the set of Holds that this section
is required to meet, indexed into MultiPath.holdSet.
ikObjectives (list of IKObjectives, optional): the set of extra IKObjectives
that this section is required to meet.
"""
def __init__(self):
self.settings = {}
self.configs = []
self.velocities = None
self.times = None
self.holds = []
self.holdIndices = []
self.ikObjectives = []
def __init__(self):
self.sections = []
self.settings = {}
self.holdSet = dict()
def numSections(self):
return len(self.sections)
def startConfig(self):
return self.sections[0].configs[0]
def endConfig(self):
return self.sections[-1].configs[-1]
def startTime(self):
if len(self.sections)==0 or self.sections[0].times==None: return 0
return self.sections[0].times[0]
def endTime(self):
"""Returns the final time parameter"""
if len(self.sections)==0: return 0
if self.sections[-1].times==None: return sum(len(s.configs)-1 for s in self.sections)
return self.sections[-1].times[-1]
def duration(self):
return self.endTime()-self.startTime()
def hasTiming(self):
"""Returns true if the multipath is timed"""
return self.sections[0].times is not None
def checkValid(self):
"""Checks for validity of the path"""
if self.hasTiming():
t0 = self.startTime()
for i,s in enumerate(self.sections):
if s.times is None:
raise ValueError("MultiPath section 0 is timed but section "+str(i)+" is not timed")
if len(s.times) != len(s.configs):
raise ValueError("MultiPath section "+str(i)+" has invalid number of times")
for t in s.times:
if t < t0:
raise ValueError("MultiPath section "+str(i)+" times are not monotonically increasing")
t0 = t
else:
for i,s in enumerate(self.sections):
if s.times is not None:
raise ValueError("MultiPath section 0 is not timed but section "+str(i)+" is timed")
for i,s in enumerate(self.sections):
if len(s.configs) <= 1:
raise ValueError("Section "+str(i)+" has 0 or 1 configuration, timing may be messed up")
return True
def isContinuous(self):
"""Returns true if all the sections are continuous (i.e., the last config of each
section matches the start config of the next)."""
for i in range(len(self.sections)-1):
if self.sections[i].configs[-1] != self.sections[i+1].configs[0]:
return False
return True
def getSectionTiming(self,section):
"""Returns a pair (tstart,tend) giving the timing of the section"""
assert section >= 0 and section < len(self.sections)
if self.hasTiming():
return self.sections[section].times[0],self.sections[section].times[-1]
t0 = 0
for i in range(section):
t0 += len(self.sections[i].configs)-1
return (t0,t0+len(self.sections[section].configs))
def getStance(self,section):
"""Returns the list of Holds that the section should satisfy"""
res = self.sections[section].holds[:]
res+=[self.holdSet[ind] for ind in self.sections[section].holdIndices]
for g in self.sections[section].ikObjectives:
h=Hold()
h.link = g.link()
h.ikConstraint = g
res.append(h)
return res
def getIKProblem(self,section):
"""Returns the set of IKObjectives that the section should satisfy"""
res = [h.ikConstraint for h in self.sections[section].holds]
res += [self.holdSet[ind].ikConstraint for ind in self.sections[section].holdIndices]
res += self.sections[section].ikObjectives
return res
def aggregateHolds(self,holdSimilarityThreshold=None):
"""Aggregates holds from all sections to the global holdSet variable, and converts
sections to use holdIndices. If holdSimilarityThreshold is not None, then sections'
holds that are the same within the given tolerance are combined into one hold."""
def sameHold(h1,h2,tol):
from ..math import vectorops,so3
if h1.link != h2.link: return False
if len(h1.contacts) != len(h2.contacts): return False
if h1.ikConstraint.numPosDims() != h2.ikConstraint.numPosDims(): return False
if h1.ikConstraint.numRotDims() != h2.ikConstraint.numRotDims(): return False
if h1.ikConstraint.numPosDims() == 3:
xl1,xw1 = h1.ikConstraint.getPosition()
xl2,xw2 = h2.ikConstraint.getPosition()
if vectorops.distanceSquared(xl1,xl2) > tol**2 or vectorops.distanceSquared(xw1,xw2) > tol**2:
return False
elif h1.ikConstraint.numPosDims() != 0:
raise NotImplementedError("Distance detection for non-point or free holds")
if h1.ikConstraint.numPosDims() == 3:
R1 = h1.ikConstraint.getRotation()
R2 = h2.ikConstraint.getRotation()
if so3.distance(R1,R2) > tol:
return False
for (c1,c2) in zip(h1.contacts,h2.contacts):
if vectorops.distanceSquared(c1,c2) > tol**2:
return False
return True
holdsByLink = dict()
for s in self.sections:
for h in s.holds:
found = False
if holdSimilarityThreshold is not None:
for oldHold,index in holdsByLink.get(h.link,[]):
if sameHold(h,oldHold,holdSimilarityThreshold):
s.holdIndices.append(index)
found = True
break
if not found:
s.holdIndices.append(len(self.holdSet))
self.holdSet[len(self.holdSet)] = h
if holdSimilarityThreshold is not None:
holdsByLink.setdefault(h.link,[]).append((h,s.holdIndices[-1]))
s.holds = []
def deaggregateHolds(self):
"""De-aggregates holds from the global holdSet variable into the sections' holds."""
for s in self.sections:
for h in s.holdIndices:
s.holds.append(self.holdSet[h])
s.holdIndices = []
self.holdSet = dict()
def setConfig(self,section,configIndex,q,v=None,t=None,maintainContinuity=True):
"""Sets a configuration along the path, maintaining continuity if maintainContinuity is true.
Equivalent to self.sections[section].configs[configIndex] = q except that adjacent sections'
configurations are also modified."""
assert section >= 0 and section < len(self.sections)
if configIndex < 0:
configIndex = len(self.sections[section].configs)-1
self.sections[section].configs[configIndex] = q
if v is not None:
assert self.sections[section].velocities is not None
self.sections[section].velocities[configIndex] = v
if t is not None:
assert self.sections[section].times is not None
self.sections[section].times[configIndex] = t
if maintainContinuity:
section0 = section
configIndex0 = configIndex
while section > 0 and configIndex == 0:
section = section - 1
configIndex = len(self.sections[section].configs)-1
self.setConfig(section,configIndex,q,v,t,False)
section = section0
configIndex = configIndex0
while section + 1 < len(self.sections) and configIndex + 1 == len(self.sections[section].configs):
section = section + 1
configIndex = 0
self.setConfig(section,configIndex,q,v,t,False)
def concat(self,path):
"""Appends the path, making sure times and holds are appropriately set"""
newSections = path.sections[:]
dt = 0.0
if path.hasTiming() and len(self.sections)>0:
#shift timing
assert(self.hasTiming())
dt = self.endTime()
for s in newSections:
s.times = [t+dt for t in s.times]
#rename global hold set
newholds = path.holdSet.copy()
namemap = dict()
for (name,h) in path.holdSet.items():
if name in self.holdSet:
found = False
for k in range(2,1000):
newname = str(name)+"("+str(k)+")"
if newname not in self.holdSet:
namemap[name] = newname
found = True
if not found:
raise ValueError("Unable to merge hold name "+name)
newholds[newname] = newholds[name]
del newholds[name]
for s in newSections:
inds = []
for h in s.holdIndices:
if h in namemap:
inds.append(namemap[h])
else:
inds.append(h)
s.holdIndices = inds
self.sections += newSections
self.holdSet.update(newholds)
def save(self,fn):
"""Saves this multipath to an xml file."""
tree = self.saveXML()
f = open(fn,'w')
f.write('<?xml version="1.0"?>\n')
f.write(_prettify(tree.getroot()))
#tree.write(fn,pretty_print=True)
f.close()
def load(self,fn):
"""Loads this multipath from a multipath xml file."""
tree = ET.parse(fn)
return self.loadXML(tree)
def saveXML(self):
"""Saves this multipath to a multipath xml tree (ElementTree)"""
root = ET.Element("multipath")
root.attrib = self.settings
for sec in self.sections:
xs = ET.Element("section")
root.append(xs)
xs.attrib = sec.settings
for ikgoal in sec.ikObjectives:
xik = ET.SubElement(xs,"ikgoal")
#xik.text = writeIKObjective(ikgoal)
xik.text = ikgoal.text
for h in sec.holds:
xh = ET.SubElement(xs,"hold")
xh.text = writeHold(h)
for h in sec.holdIndices:
xh = ET.SubElement(xs,"hold")
if isinstance(h,int):
xh.set("index",str(h))
else:
xh.set("name",str(h))
for i in range(len(sec.configs)):
xm = ET.Element("milestone")
xs.append(xm)
xm.set("config",writeVector(sec.configs[i]))
if sec.times != None:
xm.set("time",str(sec.times[i]))
if sec.velocities != None:
xm.set("velocity",writeVector(sec.velocities[i]))
for hkey,h in self.holdSet.items():
xh = ET.Element("hold")
root.append(xh)
if not isinstance(hkey,int):
xh.set('name',str(hkey))
xh.text = writeHold(h)
return ET.ElementTree(root)
def loadXML(self,tree):
"""Loads a multipath from a multipath xml tree (ElementTree)."""
self.sections = []
self.holdSet = dict()
self.settings = dict()
root = tree.getroot()
for k,v in root.attrib.items():
self.settings[k]=v
for sec in root.findall('section'):
s = MultiPath.Section()
for k,v in sec.attrib.items():
s.settings[k]=v
milestones = sec.findall('milestone')
for m in milestones:
if 'config' not in m.attrib:
raise ValueError("Milestone does not contain config attribute")
s.configs.append(readVector(m.attrib['config']))
if 'time' in m.attrib:
if s.times==None: s.times = []
s.times.append(float(m.attrib['time']))
if 'velocity' in m.attrib:
if s.velocities==None: s.velocities = []
s.velocities.append(readVector(m.attrib['velocity']))
for obj in sec.findall('ikgoal'):
s.ikObjectives.append(readIKObjective(obj.text))
s.ikObjectives[-1].text = obj.text
for h in sec.findall('hold'):
if 'index' in h.attrib:
s.holdIndices.append(int(h.attrib['index']))
elif 'name' in h.attrib:
s.holdIndices.append(h.attrib['name'])
else:
s.holds.append(readHold(h.text))
self.sections.append(s)
#read global hold set
for h in root.findall('hold'):
hold = readHold(h.text)
if 'name' in h.attrib:
self.holdSet[h.attrib['name']] = hold
else:
self.holdSet[len(self.holdSet)] = hold
return
def timeToSection(self,t):
"""Returns the section corresponding to the time parameter t"""
if not self.hasTiming():
return int(math.floor(t*len(self.sections)))
else:
if t < self.startTime(): return -1
for i,s in enumerate(self.sections):
if t < s.times[-1]:
return i
return len(self.sections)
def timeToSegment(self,t):
""" Returns a (section index,milestone index,param) tuple such that
interpolation between the section's milestone and its successor
corresponds to time t."""
s = self.timeToSection(t)
if s < 0: return (-1,0,0)
elif s >= len(self.sections): return (s,0,0)
sec = self.sections[s]
if len(sec.times)==0:
usec = t*len(self.sections)-s
tsec = (len(sec.milestones)-1)*usec
i = int(math.floor(tseg))
u = tseg - i
return (s,i,u)
else:
i = bisect.bisect_left(sec.times,t)
p = i-1
u=(t-sec.times[p])/(sec.times[i]-sec.times[p])
if i==0:
return (s,0,0)
return (s,p,u)
def eval(self,t):
"""Evaluates the MultiPath at time t."""
(s,i,u) = self.getSegment(t)
if s < 0: return self.startConfig()
elif s >= len(self.sections): return self.endConfig()
if u==0: return self.sections[s].milestones[i]
return vectorops.interpolate(self.sections[s].milestones[i],self.sections[s].milestones[i+1],u)
def getTrajectory(self,robot=None,eps=None):
"""Returns a trajectory representation of this MultiPath. If robot is provided, then a RobotTrajectory
is returned. Otherwise, if velocity information is given, then a HermiteTrajectory is returned.
Otherwise, a Trajectory is returned.
If robot and eps is given, then the IK constraints along the trajectory are solved and the path is
discretized at resolution eps.
"""
from . import trajectory
res = trajectory.Trajectory()
if robot is not None:
res = trajectory.RobotTrajectory(robot)
if self.sections[0].velocities is not None:
print("MultiPath.getTrajectory: Warning, can't discretize IK constraints with velocities specified")
elif self.sections[0].velocities is not None:
res = trajectory.HermiteTrajectory()
if robot is not None and eps is not None:
from ..plan.robotcspace import ClosedLoopRobotCSpace
hastiming = self.hasTiming()
for i,s in enumerate(self.sections):
space = ClosedLoopRobotCSpace(robot,self.getIKProblem(i))
for j in range(len(s.configs)-1):
ikpath = space.interpolationPath(s.configs[j],s.configs[j+1],eps)
if hastiming:
t0 = s.times[j]
t1 = s.times[j+1]
else:
t0 = len(res.milestones)
t1 = t0 + 1
iktimes = [t0 + float(k)/float(len(ikpath)-1)*(t1-t0) for k in range(len(ikpath))]
res.milestones += ikpath[:-1]
res.times += iktimes[:-1]
res.milestones.append(self.sections[-1].configs[-1])
else:
for s in self.sections:
res.milestones += s.configs[:-1]
res.milestones.append(self.sections[-1].configs[-1])
if self.sections[0].velocities is not None:
vels = []
for s in self.sections:
assert s.velocities is not None,"Some sections have velocities, some don't?"
vels += s.velocities[:-1]
vels.append(self.sections[-1].velocities[-1])
for i,q in enumerate(res.milestones):
assert len(vels[i]) == len(q),"Velocities don't have the right size?"
res.milestones[i] = q + vels[i]
if not self.hasTiming():
res.times = list(range(len(res.milestones)))
else:
for s in self.sections:
res.times += s.times[:-1]
res.times.append(self.sections[-1].times[-1])
return res
def _escape_nl(text):
return escape(text).replace('\n','
')
def _prettify(elem,indent_level=0):
"""Return a pretty-printed XML string for the Element.
"""
indent = " "
res = indent_level*indent + '<'+elem.tag.encode('utf-8')
for k in list(elem.keys()):
res += " "+k.encode('utf-8')+'="'+_escape_nl(elem.get(k)).encode('utf-8')+'"'
children = elem.getchildren()
if len(children)==0 and not elem.text:
res += ' />'
return res
res += '>'
if elem.text:
res += _escape_nl(elem.text).encode('utf-8')
for c in children:
res += '\n'+_prettify(c,indent_level+1)
if len(children)>0:
res += '\n'+indent_level*indent
res += '</'+elem.tag.encode('utf-8')+'>'
return res
| 42.400411 | 116 | 0.560221 |
3730eef3e8374b6627aaaf5f260c55571e2bce5f | 1,674 | py | Python | setup.py | priv-kweihmann/ensurecvs | 0fde2ea3d3b37baa8518a499051bd5b7044c7b94 | [
"BSD-2-Clause"
] | 1 | 2019-11-20T19:50:54.000Z | 2019-11-20T19:50:54.000Z | setup.py | priv-kweihmann/ensurecvs | 0fde2ea3d3b37baa8518a499051bd5b7044c7b94 | [
"BSD-2-Clause"
] | 3 | 2019-12-21T15:22:25.000Z | 2020-02-27T19:08:20.000Z | setup.py | priv-kweihmann/ensurecvs | 0fde2ea3d3b37baa8518a499051bd5b7044c7b94 | [
"BSD-2-Clause"
] | null | null | null | import setuptools
import subprocess
_long_description = "See https://github.com/priv-kweihmann/ensurecvs for documentation"
_long_description_content_type = "text/plain"
try:
_long_description = subprocess.check_output(
["pandoc", "--from", "markdown", "--to", "rst", "README.md"]).decode("utf-8")
_long_description_content_type = "text/x-rst"
except (subprocess.CalledProcessError, FileNotFoundError):
pass
requirements = []
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name="ensurecvs",
version="1.0.3",
author="Konrad Weihmann",
author_email="kweihmann@outlook.com",
description="Find important upstream fixes",
long_description=_long_description,
long_description_content_type=_long_description_content_type,
url="https://github.com/priv-kweihmann/ensurecvs",
packages=setuptools.find_packages(),
install_requires=requirements,
scripts=['bin/ensurecvs'],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Bug Tracking",
],
)
| 36.391304 | 87 | 0.663082 |
06ff546051fe4c9d40b28059b735bd5e56e5a083 | 950 | py | Python | visual-chatbot/visdial/urls.py | hzhucn/Visual_Dialogue.pytorch | 74a4d52f87afae7eafe3e8af6d4dc8809398000f | [
"MIT"
] | null | null | null | visual-chatbot/visdial/urls.py | hzhucn/Visual_Dialogue.pytorch | 74a4d52f87afae7eafe3e8af6d4dc8809398000f | [
"MIT"
] | null | null | null | visual-chatbot/visdial/urls.py | hzhucn/Visual_Dialogue.pytorch | 74a4d52f87afae7eafe3e8af6d4dc8809398000f | [
"MIT"
] | 1 | 2018-09-15T06:00:27.000Z | 2018-09-15T06:00:27.000Z | """visdial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('chat.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38 | 79 | 0.717895 |
581fa49403be3481a843bb11c5664df8711af345 | 537 | py | Python | piper/functional/elbo_module.py | branislav1991/piper | 88fede157912445e982bbcc832253c30ec86ca6b | [
"MIT"
] | null | null | null | piper/functional/elbo_module.py | branislav1991/piper | 88fede157912445e982bbcc832253c30ec86ca6b | [
"MIT"
] | 11 | 2020-11-24T21:09:58.000Z | 2020-12-01T15:53:18.000Z | piper/functional/elbo_module.py | branislav1991/piper | 88fede157912445e982bbcc832253c30ec86ca6b | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Branislav Holländer. All rights reserved.
# See the file LICENSE for copying permission.
from piper import tree
def elbo(model_tree: tree.Tree, q_tree: tree.Tree):
"""Calculates evidence lower bound given the model tree
and q tree.
"""
elbo = 0.
for name, node in model_tree.nodes.items():
qval = q_tree.nodes[name]
elbo += node.distribution.log_prob(qval.value)
for node in q_tree.nodes.values():
elbo -= node.distribution.log_prob(node.value)
return -elbo
| 25.571429 | 62 | 0.67784 |
d3f8f94c16f3ca029ae62637e9d68bb4e200971d | 336 | py | Python | tests/test_dates.py | aellerton/gilt-python | a57e7f8f3af5eecb994cabdcd9fb4866c2a3af20 | [
"Apache-2.0"
] | 1 | 2017-04-25T09:10:30.000Z | 2017-04-25T09:10:30.000Z | tests/test_dates.py | aellerton/gilt-python | a57e7f8f3af5eecb994cabdcd9fb4866c2a3af20 | [
"Apache-2.0"
] | 1 | 2016-11-26T02:31:14.000Z | 2016-11-27T07:23:53.000Z | tests/test_dates.py | aellerton/gilt-python | a57e7f8f3af5eecb994cabdcd9fb4866c2a3af20 | [
"Apache-2.0"
] | null | null | null |
# "begins": "2012-02-24T17:00:00Z",
# "ends": "2012-02-26T05:00:00Z",
import iso8601
d = iso8601.parse_date("2012-02-24T17:00:00Z")
print "2012-02-24T17:00:00Z", d
assert str(d)=="2012-02-24 17:00:00+00:00"
d = iso8601.parse_date("2012-02-26T05:00:00Z")
print "2012-02-26T05:00:00Z", d
assert str(d)=="2012-02-26 05:00:00+00:00"
| 22.4 | 46 | 0.66369 |
e72b41de3c7c13b2b87f290b3bb4bf188f1b5e0e | 78,608 | py | Python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py | timgates42/azure-sdk-for-python | 12dc44eb39f6d1e3e21356cfcfb4f42f1d03fdf1 | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py | timgates42/azure-sdk-for-python | 12dc44eb39f6d1e3e21356cfcfb4f42f1d03fdf1 | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py | timgates42/azure-sdk-for-python | 12dc44eb39f6d1e3e21356cfcfb4f42f1d03fdf1 | [
"MIT"
] | null | null | null | # coding=utf-8 pylint: disable=too-many-lines
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import re
from enum import Enum
from ._generated.models import (
LanguageInput,
MultiLanguageInput,
)
from ._generated.v3_0 import models as _v3_0_models
from ._generated.v3_1_preview_5 import models as _latest_preview_models
def _get_indices(relation):
return [int(s) for s in re.findall(r"\d+", relation)]
class DictMixin(object):
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return str(self)
def __len__(self):
return len(self.keys())
def __delitem__(self, key):
self.__dict__[key] = None
def __eq__(self, other):
"""Compare objects by comparing all attributes."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Compare objects by comparing all attributes."""
return not self.__eq__(other)
def __contains__(self, key):
return key in self.__dict__
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
def has_key(self, k):
return k in self.__dict__
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def keys(self):
return [k for k in self.__dict__ if not k.startswith('_')]
def values(self):
return [v for k, v in self.__dict__.items() if not k.startswith('_')]
def items(self):
return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
def get(self, key, default=None):
if key in self.__dict__:
return self.__dict__[key]
return default
class PiiEntityDomainType(str, Enum):
"""The different domains of PII entities that users can filter by"""
PROTECTED_HEALTH_INFORMATION = "phi" # See https://aka.ms/tanerpii for more information.
class HealthcareEntityRelationRoleType(str, Enum):
"""Type of roles entities can have in `entity_relations`. There may be roles not covered in this enum"""
ABBREVIATED_TERM = "AbbreviatedTerm"
FULL_TERM = "FullTerm"
DIRECTION = "Direction"
BODY_STRUCTURE = "BodyStructure"
CONDITION = "Condition"
EXAMINATION = "Examination"
TREATMENT = "Treatment"
DOSAGE = "Dosage"
MEDICATION = "Medication"
FORM = "Form"
FREQUENCY = "Frequency"
QUALIFIER = "Qualifier"
RELATION = "Relation"
ROUTE = "Route"
TIME = "Time"
EVENT = "Event"
UNIT = "Unit"
VALUE = "Value"
class DetectedLanguage(DictMixin):
"""DetectedLanguage contains the predicted language found in text,
its confidence score, and its ISO 639-1 representation.
:ivar name: Long name of a detected language (e.g. English,
French).
:vartype name: str
:ivar iso6391_name: A two letter representation of the detected
language according to the ISO 639-1 standard (e.g. en, fr).
:vartype iso6391_name: str
:ivar confidence_score: A confidence score between 0 and 1. Scores close
to 1 indicate 100% certainty that the identified language is true.
:vartype confidence_score: float
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.iso6391_name = kwargs.get("iso6391_name", None)
self.confidence_score = kwargs.get("confidence_score", None)
@classmethod
def _from_generated(cls, language):
return cls(
name=language.name, iso6391_name=language.iso6391_name, confidence_score=language.confidence_score
)
def __repr__(self):
return "DetectedLanguage(name={}, iso6391_name={}, confidence_score={})" \
.format(self.name, self.iso6391_name, self.confidence_score)[:1024]
class RecognizeEntitiesResult(DictMixin):
"""RecognizeEntitiesResult is a result object which contains
the recognized entities from a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.CategorizedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \
.format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
class RecognizePiiEntitiesResult(DictMixin):
"""RecognizePiiEntitiesResult is a result object which contains
the recognized Personally Identifiable Information (PII) entities
from a particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Recognized PII entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.PiiEntity]
:ivar str redacted_text: Returns the text of the input document with all of the PII information
redacted out. Only returned for API versions v3.1-preview and up.
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizePiiEntitiesResult.
.. versionadded:: v3.1-preview
The *redacted_text* parameter.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.redacted_text = kwargs.get("redacted_text", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizePiiEntitiesResult(id={}, entities={}, redacted_text={}, warnings={}, " \
"statistics={}, is_error={})" .format(
self.id,
repr(self.entities),
self.redacted_text,
repr(self.warnings),
repr(self.statistics),
self.is_error
)[:1024]
class AnalyzeHealthcareEntitiesResult(DictMixin):
"""
AnalyzeHealthcareEntitiesResult contains the Healthcare entities from a
particular document.
:ivar str id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:ivar entities: Identified Healthcare entities in the document, i.e. in
the document "The subject took ibuprofen", "ibuprofen" is an identified entity
from the document.
:vartype entities:
list[~azure.ai.textanalytics.HealthcareEntity]
:ivar entity_relations: Identified Healthcare relations between entities. For example, in the
document "The subject took 100mg of ibuprofen", we would identify the relationship
between the dosage of 100mg and the medication ibuprofen.
:vartype entity_relations: list[~azure.ai.textanalytics.HealthcareRelation]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If show_stats=true was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeHealthcareEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.entity_relations = kwargs.get("entity_relations", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
@classmethod
def _from_generated(cls, healthcare_result):
entities = [HealthcareEntity._from_generated(e) for e in healthcare_result.entities] # pylint: disable=protected-access
relations = [HealthcareRelation._from_generated(r, entities) for r in healthcare_result.relations] # pylint: disable=protected-access
return cls(
id=healthcare_result.id,
entities=entities,
entity_relations=relations,
warnings=healthcare_result.warnings,
statistics=healthcare_result.statistics,
)
def __repr__(self):
return "AnalyzeHealthcareEntitiesResult(id={}, entities={}, entity_relations={}, warnings={}, "\
"statistics={}, is_error={})".format(
self.id,
repr(self.entities),
repr(self.entity_relations),
repr(self.warnings),
repr(self.statistics),
self.is_error
)[:1024]
class HealthcareRelation(DictMixin):
"""HealthcareRelation is a result object which represents a relation detected in a document.
Every HealthcareRelation is an entity graph of a certain relation type,
where all entities are connected and have specific roles within the relation context.
:ivar relation_type: The type of relation, i.e. the relationship between "100mg" and
"ibuprofen" in the document "The subject took 100 mg of ibuprofen" is "DosageOfMedication".
:vartype relation_type: str or ~azure.ai.textanalytics.HealthcareEntityRelationType
:ivar roles: The roles present in this relation. I.e., in the document
"The subject took 100 mg of ibuprofen", the present roles are "Dosage" and "Medication".
:vartype roles: list[~azure.ai.textanalytics.HealthcareRelationRole]
"""
def __init__(self, **kwargs):
self.relation_type = kwargs.get("relation_type")
self.roles = kwargs.get("roles")
@classmethod
def _from_generated(cls, healthcare_relation_result, entities):
roles = [
HealthcareRelationRole._from_generated(r, entities) # pylint: disable=protected-access
for r in healthcare_relation_result.entities
]
return cls(
relation_type=healthcare_relation_result.relation_type,
roles=roles,
)
def __repr__(self):
return "HealthcareRelation(relation_type={}, roles={})".format(
self.relation_type,
repr(self.roles),
)[:1024]
class HealthcareRelationRole(DictMixin):
"""A model representing a role in a relation.
For example, in "The subject took 100 mg of ibuprofen",
"100 mg" is a dosage entity fulfilling the role "Dosage"
in the extracted relation "DosageofMedication".
:ivar name: The role of the entity in the relationship. I.e., in the relation
"The subject took 100 mg of ibuprofen", the dosage entity "100 mg" has role
"Dosage".
:vartype name: str or ~azure.ai.textanalytics.HealthcareEntityRelationRoleType
:ivar entity: The entity that is present in the relationship. For example, in
"The subject took 100 mg of ibuprofen", this property holds the dosage entity
of "100 mg".
:vartype entity: ~azure.ai.textanalytics.HealthcareEntity
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name")
self.entity = kwargs.get("entity")
@staticmethod
def _get_entity(healthcare_role_result, entities):
nums = _get_indices(healthcare_role_result.ref)
entity_index = nums[1] # first num parsed from index is document #, second is entity index
return entities[entity_index]
@classmethod
def _from_generated(cls, healthcare_role_result, entities):
return cls(
name=healthcare_role_result.role,
entity=HealthcareRelationRole._get_entity(healthcare_role_result, entities)
)
def __repr__(self):
return "HealthcareRelationRole(name={}, entity={})".format(
self.name, repr(self.entity)
)
class DetectLanguageResult(DictMixin):
"""DetectLanguageResult is a result object which contains
the detected language of a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar primary_language: The primary language detected in the document.
:vartype primary_language: ~azure.ai.textanalytics.DetectedLanguage
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a DetectLanguageResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.primary_language = kwargs.get("primary_language", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "DetectLanguageResult(id={}, primary_language={}, warnings={}, statistics={}, "\
"is_error={})".format(self.id, repr(self.primary_language), repr(self.warnings),
repr(self.statistics), self.is_error)[:1024]
class CategorizedEntity(DictMixin):
"""CategorizedEntity contains information about a particular
entity found in text.
:ivar text: Entity text as appears in the request.
:vartype text: str
:ivar category: Entity category, such as Person/Location/Org/SSN etc
:vartype category: str
:ivar subcategory: Entity subcategory, such as Age/Year/TimeRange etc
:vartype subcategory: str
:ivar int length: The entity text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default. Only returned for API versions v3.1-preview and up.
:ivar int offset: The entity text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default. Only returned for
API versions v3.1-preview and up.
:ivar confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:vartype confidence_score: float
.. versionadded:: v3.1-preview
The *offset* property.
"""
def __init__(self, **kwargs):
self.text = kwargs.get('text', None)
self.category = kwargs.get('category', None)
self.subcategory = kwargs.get('subcategory', None)
self.length = kwargs.get('length', None)
self.offset = kwargs.get('offset', None)
self.confidence_score = kwargs.get('confidence_score', None)
@classmethod
def _from_generated(cls, entity):
offset = entity.offset
length = entity.length
if isinstance(entity, _v3_0_models.Entity):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=length,
offset=offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return "CategorizedEntity(text={}, category={}, subcategory={}, "\
"length={}, offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score
)[:1024]
class PiiEntity(DictMixin):
"""PiiEntity contains information about a Personally Identifiable
Information (PII) entity found in text.
:ivar str text: Entity text as appears in the request.
:ivar str category: Entity category, such as Financial Account
Identification/Social Security Number/Phone Number, etc.
:ivar str subcategory: Entity subcategory, such as Credit Card/EU
Phone number/ABA Routing Numbers, etc.
:ivar int length: The PII entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which
is UnicodeCodePoints by default.
:ivar int offset: The PII entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
"""
def __init__(self, **kwargs):
self.text = kwargs.get('text', None)
self.category = kwargs.get('category', None)
self.subcategory = kwargs.get('subcategory', None)
self.length = kwargs.get('length', None)
self.offset = kwargs.get('offset', None)
self.confidence_score = kwargs.get('confidence_score', None)
@classmethod
def _from_generated(cls, entity):
return cls(
text=entity.text,
category=entity.category,
subcategory=entity.subcategory,
length=entity.length,
offset=entity.offset,
confidence_score=entity.confidence_score,
)
def __repr__(self):
return (
"PiiEntity(text={}, category={}, subcategory={}, length={}, "\
"offset={}, confidence_score={})".format(
self.text,
self.category,
self.subcategory,
self.length,
self.offset,
self.confidence_score
)[:1024]
)
class HealthcareEntity(DictMixin):
"""HealthcareEntity contains information about a Healthcare entity found in text.
:ivar str text: Entity text as appears in the document.
:ivar str normalized_text: Optional. Normalized version of the raw `text` we extract
from the document. Not all `text`s have a normalized version.
:ivar str category: Entity category, see the following link for health's named
entity types: https://aka.ms/text-analytics-health-entities
:ivar str subcategory: Entity subcategory.
:ivar assertion: Contains various assertions about this entity. For example, if
an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:vartype assertion: ~azure.ai.textanalytics.HealthcareEntityAssertion
:ivar int length: The entity text length. This value depends on the value
of the `string_index_type` parameter specified in the original request, which is
UnicodeCodePoints by default.
:ivar int offset: The entity text offset from the start of the document.
This value depends on the value of the `string_index_type` parameter specified
in the original request, which is UnicodeCodePoints by default.
:ivar float confidence_score: Confidence score between 0 and 1 of the extracted
entity.
:ivar data_sources: A collection of entity references in known data sources.
:vartype data_sources: list[~azure.ai.textanalytics.HealthcareEntityDataSource]
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.normalized_text = kwargs.get("normalized_text", None)
self.category = kwargs.get("category", None)
self.subcategory = kwargs.get("subcategory", None)
self.assertion = kwargs.get("assertion", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.confidence_score = kwargs.get("confidence_score", None)
self.data_sources = kwargs.get("data_sources", [])
@classmethod
def _from_generated(cls, healthcare_entity):
assertion = None
try:
if healthcare_entity.assertion:
assertion = HealthcareEntityAssertion._from_generated( # pylint: disable=protected-access
healthcare_entity.assertion
)
except AttributeError:
assertion = None
return cls(
text=healthcare_entity.text,
normalized_text=healthcare_entity.name,
category=healthcare_entity.category,
subcategory=healthcare_entity.subcategory,
assertion=assertion,
length=healthcare_entity.length,
offset=healthcare_entity.offset,
confidence_score=healthcare_entity.confidence_score,
data_sources=[
HealthcareEntityDataSource(entity_id=l.id, name=l.data_source) for l in healthcare_entity.links
] if healthcare_entity.links else None
)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "HealthcareEntity(text={}, normalized_text={}, category={}, subcategory={}, assertion={}, length={}, "\
"offset={}, confidence_score={}, data_sources={})".format(
self.text,
self.normalized_text,
self.category,
self.subcategory,
repr(self.assertion),
self.length,
self.offset,
self.confidence_score,
repr(self.data_sources),
)[:1024]
class HealthcareEntityAssertion(DictMixin):
"""Contains various assertions about a `HealthcareEntity`.
For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
with another diagnosis?
:ivar str conditionality: Describes whether the healthcare entity it's on is conditional on another entity.
For example, "If the patient has a fever, he has pneumonia", the diagnosis of pneumonia
is 'conditional' on whether the patient has a fever. Possible values are "hypothetical" and
"conditional".
:ivar str certainty: Describes how certain the healthcare entity it's on is. For example,
in "The patient may have a fever", the fever entity is not 100% certain, but is instead
"positivePossible". Possible values are "positive", "positivePossible", "neutralPossible",
"negativePossible", and "negative".
:ivar str association: Describes whether the healthcare entity it's on is the subject of the document, or
if this entity describes someone else in the document. For example, in "The subject's mother has
a fever", the "fever" entity is not associated with the subject themselves, but with the subject's
mother. Possible values are "subject" and "other".
"""
def __init__(self, **kwargs):
self.conditionality = kwargs.get("conditionality", None)
self.certainty = kwargs.get("certainty", None)
self.association = kwargs.get("association", None)
@classmethod
def _from_generated(cls, healthcare_assertion):
return cls(
conditionality=healthcare_assertion.conditionality,
certainty=healthcare_assertion.certainty,
association=healthcare_assertion.association,
)
def __repr__(self):
return "HealthcareEntityAssertion(conditionality={}, certainty={}, association={})".format(
self.conditionality, self.certainty, self.association
)
class HealthcareEntityDataSource(DictMixin):
"""
HealthcareEntityDataSource contains information representing an entity reference in a known data source.
:ivar str entity_id: ID of the entity in the given source catalog.
:ivar str name: The name of the entity catalog from where the entity was identified, such as UMLS, CHV, MSH, etc.
"""
def __init__(self, **kwargs):
self.entity_id = kwargs.get("entity_id", None)
self.name = kwargs.get("name", None)
def __repr__(self):
return "HealthcareEntityDataSource(entity_id={}, name={})".format(self.entity_id, self.name)[:1024]
class TextAnalyticsError(DictMixin):
"""TextAnalyticsError contains the error code, message, and
other details that explain why the batch or individual document
failed to be processed by the service.
:ivar code: Error code. Possible values include:
'invalidRequest', 'invalidArgument', 'internalServerError',
'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat',
'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar target: Error target.
:vartype target: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
@classmethod
def _from_generated(cls, err):
if err.innererror:
return cls(
code=err.innererror.code,
message=err.innererror.message,
target=err.innererror.target
)
return cls(
code=err.code,
message=err.message,
target=err.target
)
def __repr__(self):
return "TextAnalyticsError(code={}, message={}, target={})" \
.format(self.code, self.message, self.target)[:1024]
class TextAnalyticsWarning(DictMixin):
"""TextAnalyticsWarning contains the warning code and message that explains why
the response has a warning.
:ivar code: Warning code. Possible values include: 'LongWordsInDocument',
'DocumentTruncated'.
:vartype code: str
:ivar message: Warning message.
:vartype message: str
"""
def __init__(self, **kwargs):
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
@classmethod
def _from_generated(cls, warning):
return cls(
code=warning.code,
message=warning.message,
)
def __repr__(self):
return "TextAnalyticsWarning(code={}, message={})" \
.format(self.code, self.message)[:1024]
class ExtractKeyPhrasesResult(DictMixin):
"""ExtractKeyPhrasesResult is a result object which contains
the key phrases found in a particular document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar key_phrases: A list of representative words or phrases.
The number of key phrases returned is proportional to the number of words
in the input document.
:vartype key_phrases: list[str]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a ExtractKeyPhrasesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.key_phrases = kwargs.get("key_phrases", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})" \
.format(self.id, self.key_phrases, repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
class RecognizeLinkedEntitiesResult(DictMixin):
"""RecognizeLinkedEntitiesResult is a result object which contains
links to a well-known knowledge base, like for example, Wikipedia or Bing.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar entities: Recognized well-known entities in the document.
:vartype entities:
list[~azure.ai.textanalytics.LinkedEntity]
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a RecognizeLinkedEntitiesResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.entities = kwargs.get("entities", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.is_error = False
def __repr__(self):
return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})" \
.format(self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error)[:1024]
class AnalyzeSentimentResult(DictMixin):
"""AnalyzeSentimentResult is a result object which contains
the overall predicted sentiment and confidence scores for your document
and a per-sentence sentiment prediction with scores.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar sentiment: Predicted sentiment for document (Negative,
Neutral, Positive, or Mixed). Possible values include: 'positive',
'neutral', 'negative', 'mixed'
:vartype sentiment: str
:ivar warnings: Warnings encountered while processing document. Results will still be returned
if there are warnings, but they may not be fully accurate.
:vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning]
:ivar statistics: If `show_stats=True` was specified in the request this
field will contain information about the document payload.
:vartype statistics:
~azure.ai.textanalytics.TextDocumentStatistics
:ivar confidence_scores: Document level sentiment confidence
scores between 0 and 1 for each sentiment label.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar sentences: Sentence level sentiment analysis.
:vartype sentences:
list[~azure.ai.textanalytics.SentenceSentiment]
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always False for an instance of a AnalyzeSentimentResult.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.sentiment = kwargs.get("sentiment", None)
self.warnings = kwargs.get("warnings", [])
self.statistics = kwargs.get("statistics", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.sentences = kwargs.get("sentences", None)
self.is_error = False
def __repr__(self):
return "AnalyzeSentimentResult(id={}, sentiment={}, warnings={}, statistics={}, confidence_scores={}, "\
"sentences={}, is_error={})".format(
self.id, self.sentiment, repr(self.warnings), repr(self.statistics),
repr(self.confidence_scores), repr(self.sentences), self.is_error)[:1024]
class TextDocumentStatistics(DictMixin):
"""TextDocumentStatistics contains information about
the document payload.
:ivar character_count: Number of text elements recognized in
the document.
:vartype character_count: int
:ivar transaction_count: Number of transactions for the document.
:vartype transaction_count: int
"""
def __init__(self, **kwargs):
self.character_count = kwargs.get("character_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, stats):
if stats is None:
return None
return cls(
character_count=stats.characters_count,
transaction_count=stats.transactions_count,
)
def __repr__(self):
return "TextDocumentStatistics(character_count={}, transaction_count={})" \
.format(self.character_count, self.transaction_count)[:1024]
class DocumentError(DictMixin):
"""DocumentError is an error object which represents an error on
the individual document.
:ivar id: Unique, non-empty document identifier that matches the
document id that was passed in with the request. If not specified
in the request, an id is assigned for the document.
:vartype id: str
:ivar error: The document error.
:vartype error: ~azure.ai.textanalytics.TextAnalyticsError
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""
def __init__(self, **kwargs):
self.id = kwargs.get("id", None)
self.error = kwargs.get("error", None)
self.is_error = True
def __getattr__(self, attr):
result_set = set()
result_set.update(
RecognizeEntitiesResult().keys() + RecognizePiiEntitiesResult().keys()
+ DetectLanguageResult().keys() + RecognizeLinkedEntitiesResult().keys()
+ AnalyzeSentimentResult().keys() + ExtractKeyPhrasesResult().keys()
)
result_attrs = result_set.difference(DocumentError().keys())
if attr in result_attrs:
raise AttributeError(
"'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
"Document Id: {}\nError: {} - {}\n".
format(attr, self.id, self.error.code, self.error.message)
)
raise AttributeError("'DocumentError' object has no attribute '{}'".format(attr))
@classmethod
def _from_generated(cls, doc_err):
return cls(
id=doc_err.id,
error=TextAnalyticsError._from_generated(doc_err.error), # pylint: disable=protected-access
is_error=True
)
def __repr__(self):
return "DocumentError(id={}, error={}, is_error={})" \
.format(self.id, repr(self.error), self.is_error)[:1024]
class DetectLanguageInput(LanguageInput):
"""The input document to be analyzed for detecting language.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar country_hint: A country hint to help better detect
the language of the text. Accepts two letter country codes
specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
in the string "none" to not use a country_hint.
:vartype country_hint: str
"""
def __init__(self, **kwargs):
super(DetectLanguageInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.country_hint = kwargs.get("country_hint", None)
def __repr__(self):
return "DetectLanguageInput(id={}, text={}, country_hint={})" \
.format(self.id, self.text, self.country_hint)[:1024]
class LinkedEntity(DictMixin):
"""LinkedEntity contains a link to the well-known recognized
entity in text. The link comes from a data source like Wikipedia
or Bing. It additionally includes all of the matches of this
entity found in the document.
:ivar name: Entity Linking formal name.
:vartype name: str
:ivar matches: List of instances this entity appears in the text.
:vartype matches:
list[~azure.ai.textanalytics.LinkedEntityMatch]
:ivar language: Language used in the data source.
:vartype language: str
:ivar data_source_entity_id: Unique identifier of the recognized entity from the data
source.
:vartype data_source_entity_id: str
:ivar url: URL to the entity's page from the data source.
:vartype url: str
:ivar data_source: Data source used to extract entity linking,
such as Wiki/Bing etc.
:vartype data_source: str
:ivar str bing_entity_search_api_id: Bing Entity Search unique identifier of the recognized entity.
Use in conjunction with the Bing Entity Search SDK to fetch additional relevant information.
Only available for API version v3.1-preview and up.
.. versionadded:: v3.1-preview
The *bing_entity_search_api_id* property.
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.matches = kwargs.get("matches", None)
self.language = kwargs.get("language", None)
self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
self.url = kwargs.get("url", None)
self.data_source = kwargs.get("data_source", None)
self.bing_entity_search_api_id = kwargs.get("bing_entity_search_api_id", None)
@classmethod
def _from_generated(cls, entity):
bing_entity_search_api_id = entity.bing_id if hasattr(entity, "bing_id") else None
return cls(
name=entity.name,
matches=[LinkedEntityMatch._from_generated(e) for e in entity.matches], # pylint: disable=protected-access
language=entity.language,
data_source_entity_id=entity.id,
url=entity.url,
data_source=entity.data_source,
bing_entity_search_api_id=bing_entity_search_api_id,
)
def __repr__(self):
return "LinkedEntity(name={}, matches={}, language={}, data_source_entity_id={}, url={}, " \
"data_source={}, bing_entity_search_api_id={})".format(
self.name,
repr(self.matches),
self.language,
self.data_source_entity_id,
self.url,
self.data_source,
self.bing_entity_search_api_id,
)[:1024]
class LinkedEntityMatch(DictMixin):
"""A match for the linked entity found in text. Provides
the confidence score of the prediction and where the entity
was found in the text.
:ivar confidence_score: If a well-known item is recognized, a
decimal number denoting the confidence level between 0 and 1 will be
returned.
:vartype confidence_score: float
:ivar text: Entity text as appears in the request.
:ivar int length: The linked entity match text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints by default.
Only returned for API versions v3.1-preview and up.
:ivar int offset: The linked entity match text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
Only returned for API versions v3.1-preview and up.
:vartype text: str
.. versionadded:: v3.1-preview
The *offset* property.
"""
def __init__(self, **kwargs):
self.confidence_score = kwargs.get("confidence_score", None)
self.text = kwargs.get("text", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, match):
offset = match.offset
length = match.length
if isinstance(match, _v3_0_models.Match):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
return cls(
confidence_score=match.confidence_score,
text=match.text,
length=length,
offset=offset,
)
def __repr__(self):
return "LinkedEntityMatch(confidence_score={}, text={}, length={}, offset={})".format(
self.confidence_score, self.text, self.length, self.offset
)[:1024]
class TextDocumentInput(DictMixin, MultiLanguageInput):
"""The input document to be analyzed by the service.
:keyword str id: Unique, non-empty document identifier.
:keyword str text: The input text to process.
:keyword str language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:ivar id: Required. Unique, non-empty document identifier.
:vartype id: str
:ivar text: Required. The input text to process.
:vartype text: str
:ivar language: This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, uses "en" for English as default.
:vartype language: str
"""
def __init__(self, **kwargs):
super(TextDocumentInput, self).__init__(**kwargs)
self.id = kwargs.get("id", None)
self.text = kwargs.get("text", None)
self.language = kwargs.get("language", None)
def __repr__(self):
return "TextDocumentInput(id={}, text={}, language={})" \
.format(self.id, self.text, self.language)[:1024]
class TextDocumentBatchStatistics(DictMixin):
"""TextDocumentBatchStatistics contains information about the
request payload. Note: This object is not returned
in the response and needs to be retrieved by a response hook.
:ivar document_count: Number of documents submitted in the request.
:vartype document_count: int
:ivar valid_document_count: Number of valid documents. This
excludes empty, over-size limit or non-supported languages documents.
:vartype valid_document_count: int
:ivar erroneous_document_count: Number of invalid documents.
This includes empty, over-size limit or non-supported languages documents.
:vartype erroneous_document_count: int
:ivar transaction_count: Number of transactions for the request.
:vartype transaction_count: long
"""
def __init__(self, **kwargs):
self.document_count = kwargs.get("document_count", None)
self.valid_document_count = kwargs.get("valid_document_count", None)
self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
self.transaction_count = kwargs.get("transaction_count", None)
@classmethod
def _from_generated(cls, statistics):
if statistics is None:
return None
return cls(
document_count=statistics["documentsCount"],
valid_document_count=statistics["validDocumentsCount"],
erroneous_document_count=statistics["erroneousDocumentsCount"],
transaction_count=statistics["transactionsCount"],
)
def __repr__(self):
return "TextDocumentBatchStatistics(document_count={}, valid_document_count={}, erroneous_document_count={}, " \
"transaction_count={})".format(self.document_count, self.valid_document_count,
self.erroneous_document_count, self.transaction_count)[:1024]
class SentenceSentiment(DictMixin):
"""SentenceSentiment contains the predicted sentiment and
confidence scores for each individual sentence in the document.
:ivar text: The sentence text.
:vartype text: str
:ivar sentiment: The predicted Sentiment for the sentence.
Possible values include: 'positive', 'neutral', 'negative'
:vartype sentiment: str
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the sentence for all labels.
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The sentence text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default. Only returned for API versions v3.1-preview and up.
:ivar int offset: The sentence text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default. Only returned for
API versions v3.1-preview and up.
:ivar mined_opinions: The list of opinions mined from this sentence.
For example in the sentence "The food is good, but the service is bad", we would
mine the two opinions "food is good" and "service is bad". Only returned
if `show_opinion_mining` is set to True in the call to `analyze_sentiment` and
api version is v3.1-preview and up.
:vartype mined_opinions:
list[~azure.ai.textanalytics.MinedOpinion]
.. versionadded:: v3.1-preview
The *offset* and *mined_opinions* properties.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.mined_opinions = kwargs.get("mined_opinions", None)
@classmethod
def _from_generated(cls, sentence, results, sentiment):
offset = sentence.offset
length = sentence.length
if isinstance(sentence, _v3_0_models.SentenceSentiment):
# we do not return offset for v3.0 since
# the correct encoding was not introduced for v3.0
offset = None
length = None
if hasattr(sentence, "targets"):
mined_opinions = (
[MinedOpinion._from_generated(target, results, sentiment) for target in sentence.targets] # pylint: disable=protected-access
if sentence.targets else []
)
else:
mined_opinions = None
return cls(
text=sentence.text,
sentiment=sentence.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated(sentence.confidence_scores), # pylint: disable=protected-access
length=length,
offset=offset,
mined_opinions=mined_opinions
)
def __repr__(self):
return "SentenceSentiment(text={}, sentiment={}, confidence_scores={}, "\
"length={}, offset={}, mined_opinions={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
repr(self.mined_opinions)
)[:1024]
class MinedOpinion(DictMixin):
"""A mined opinion object represents an opinion we've extracted from a sentence.
It consists of both a target that these opinions are about, and the assessments
representing the opinion.
:ivar target: The target of an opinion about a product/service.
:vartype target: ~azure.ai.textanalytics.TargetSentiment
:ivar assessments: The assessments representing the opinion of the target.
:vartype assessments: list[~azure.ai.textanalytics.AssessmentSentiment]
"""
def __init__(self, **kwargs):
self.target = kwargs.get("target", None)
self.assessments = kwargs.get("assessments", None)
@staticmethod
def _get_assessments(relations, results, sentiment): # pylint: disable=unused-argument
if not relations:
return []
assessment_relations = [r.ref for r in relations if r.relation_type == "assessment"]
assessments = []
for assessment_relation in assessment_relations:
nums = _get_indices(assessment_relation)
sentence_index = nums[1]
assessment_index = nums[2]
assessments.append(
sentiment.sentences[sentence_index].assessments[assessment_index]
)
return assessments
@classmethod
def _from_generated(cls, target, results, sentiment):
return cls(
target=TargetSentiment._from_generated(target), # pylint: disable=protected-access
assessments=[
AssessmentSentiment._from_generated(assessment) # pylint: disable=protected-access
for assessment in cls._get_assessments(target.relations, results, sentiment)
],
)
def __repr__(self):
return "MinedOpinion(target={}, assessments={})".format(
repr(self.target),
repr(self.assessments)
)[:1024]
class TargetSentiment(DictMixin):
"""TargetSentiment contains the predicted sentiment,
confidence scores and other information about a key component of a product/service.
For example in "The food at Hotel Foo is good", "food" is an key component of
"Hotel Foo".
:ivar str text: The text value of the target.
:ivar str sentiment: The predicted Sentiment for the target. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the target for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The target text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The target text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
@classmethod
def _from_generated(cls, target):
return cls(
text=target.text,
sentiment=target.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated(target.confidence_scores), # pylint: disable=protected-access
length=target.length,
offset=target.offset,
)
def __repr__(self):
return "TargetSentiment(text={}, sentiment={}, confidence_scores={}, "\
"length={}, offset={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
)[:1024]
class AssessmentSentiment(DictMixin):
"""AssessmentSentiment contains the predicted sentiment,
confidence scores and other information about an assessment given about
a particular target. For example, in the sentence "The food is good", the assessment
of the target 'food' is 'good'.
:ivar str text: The assessment text.
:ivar str sentiment: The predicted Sentiment for the assessment. Possible values
include 'positive', 'mixed', and 'negative'.
:ivar confidence_scores: The sentiment confidence score between 0
and 1 for the assessment for 'positive' and 'negative' labels. It's score
for 'neutral' will always be 0
:vartype confidence_scores:
~azure.ai.textanalytics.SentimentConfidenceScores
:ivar int length: The assessment text length. This value depends on the value of the
`string_index_type` parameter set in the original request, which is UnicodeCodePoints
by default.
:ivar int offset: The assessment text offset from the start of the document.
The value depends on the value of the `string_index_type` parameter
set in the original request, which is UnicodeCodePoints by default.
:ivar bool is_negated: Whether the value of the assessment is negated. For example, in
"The food is not good", the assessment "good" is negated.
"""
def __init__(self, **kwargs):
self.text = kwargs.get("text", None)
self.sentiment = kwargs.get("sentiment", None)
self.confidence_scores = kwargs.get("confidence_scores", None)
self.length = kwargs.get("length", None)
self.offset = kwargs.get("offset", None)
self.is_negated = kwargs.get("is_negated", None)
@classmethod
def _from_generated(cls, assessment):
return cls(
text=assessment.text,
sentiment=assessment.sentiment,
confidence_scores=SentimentConfidenceScores._from_generated(assessment.confidence_scores), # pylint: disable=protected-access
length=assessment.length,
offset=assessment.offset,
is_negated=assessment.is_negated
)
def __repr__(self):
return (
"AssessmentSentiment(text={}, sentiment={}, confidence_scores={}, length={}, offset={}, " \
"is_negated={})".format(
self.text,
self.sentiment,
repr(self.confidence_scores),
self.length,
self.offset,
self.is_negated
)[:1024]
)
class SentimentConfidenceScores(DictMixin):
"""The confidence scores (Softmax scores) between 0 and 1.
Higher values indicate higher confidence.
:ivar positive: Positive score.
:vartype positive: float
:ivar neutral: Neutral score.
:vartype neutral: float
:ivar negative: Negative score.
:vartype negative: float
"""
def __init__(self, **kwargs):
self.positive = kwargs.get('positive', 0.0)
self.neutral = kwargs.get('neutral', 0.0)
self.negative = kwargs.get('negative', 0.0)
@classmethod
def _from_generated(cls, score):
return cls(
positive=score.positive,
neutral=score.neutral if hasattr(score, "neutral") else 0.0,
negative=score.negative
)
def __repr__(self):
return "SentimentConfidenceScores(positive={}, neutral={}, negative={})" \
.format(self.positive, self.neutral, self.negative)[:1024]
class AnalyzeActionsType(str, Enum):
"""The type of action that was applied to the documents
"""
RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action.
RECOGNIZE_PII_ENTITIES = "recognize_pii_entities" #: PII Entities Recognition action.
EXTRACT_KEY_PHRASES = "extract_key_phrases" #: Key Phrase Extraction action.
RECOGNIZE_LINKED_ENTITIES = "recognize_linked_entities" #: Linked Entities Recognition action.
ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action.
class AnalyzeActionsResult(DictMixin):
"""AnalyzeActionsResult contains the results of a recognize entities action
on a list of documents. Returned by `begin_analyze_actions`
:ivar document_results: A list of objects containing results for all Entity Recognition actions
included in the analysis.
:vartype document_results: list[~azure.ai.textanalytics.RecognizeEntitiesResult]
:ivar bool is_error: Boolean check for error item when iterating over list of
actions. Always False for an instance of a AnalyzeActionsResult.
:ivar action_type: The type of action this class is a result of.
:vartype action_type: str or ~azure.ai.textanalytics.AnalyzeActionsType
:ivar ~datetime.datetime completed_on: Date and time (UTC) when the result completed
on the service.
"""
def __init__(self, **kwargs):
self.document_results = kwargs.get("document_results")
self.is_error = False
self.action_type = kwargs.get("action_type")
self.completed_on = kwargs.get("completed_on")
def __repr__(self):
return "AnalyzeActionsResult(document_results={}, is_error={}, action_type={}, completed_on={})".format(
repr(self.document_results),
self.is_error,
self.action_type,
self.completed_on,
)[:1024]
class AnalyzeActionsError(DictMixin):
"""AnalyzeActionsError is an error object which represents an an
error response for an action.
:ivar error: The action result error.
:vartype error: ~azure.ai.textanalytics.TextAnalyticsError
:ivar bool is_error: Boolean check for error item when iterating over list of
results. Always True for an instance of a DocumentError.
"""
def __init__(self, **kwargs):
self.error = kwargs.get("error")
self.is_error = True
def __repr__(self):
return "AnalyzeActionsError(error={}, is_error={}".format(
repr(self.error), self.is_error
)
@classmethod
def _from_generated(cls, error):
return cls(
error=TextAnalyticsError(code=error.code, message=error.message, target=error.target)
)
class RecognizeEntitiesAction(DictMixin):
"""RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation.
If you just want to recognize entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "RecognizeEntitiesAction(model_version={}, string_index_type={}, disable_service_logs={})" \
.format(self.model_version, self.string_index_type, self.disable_service_logs)[:1024]
def to_generated(self):
return _latest_preview_models.EntitiesTask(
parameters=_latest_preview_models.EntitiesTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
)
)
class AnalyzeSentimentAction(DictMixin):
"""AnalyzeSentimentAction encapsulates the parameters for starting a long-running
Sentiment Analysis operation.
If you just want to analyze sentiment in a list of documents, and not perform multiple
long running actions on the input of documents, call method `analyze_sentiment` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
granular analysis around the aspects of a product or service (also known as
aspect-based sentiment analysis). If set to true, the returned
:class:`~azure.ai.textanalytics.SentenceSentiment` objects
will have property `mined_opinions` containing the result of this analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get('model_version', "latest")
self.show_opinion_mining = kwargs.get('show_opinion_mining', False)
self.string_index_type = kwargs.get('string_index_type', None)
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "AnalyzeSentimentAction(model_version={}, show_opinion_mining={}, string_index_type={}, "\
"disable_service_logs={}".format(
self.model_version,
self.show_opinion_mining,
self.string_index_type,
self.disable_service_logs,
)[:1024]
def to_generated(self):
return _latest_preview_models.SentimentAnalysisTask(
parameters=_latest_preview_models.SentimentAnalysisTaskParameters(
model_version=self.model_version,
opinion_mining=self.show_opinion_mining,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
)
)
class RecognizePiiEntitiesAction(DictMixin):
"""RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII
Entities Recognition operation.
If you just want to recognize pii entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_pii_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str domain_filter: An optional string to set the PII domain to include only a
subset of the PII entity categories. Possible values include 'phi' or None.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.domain_filter = kwargs.get("domain_filter", None)
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "RecognizePiiEntitiesAction(model_version={}, domain_filter={}, string_index_type={}, "\
"disable_service_logs={}".format(
self.model_version,
self.domain_filter,
self.string_index_type,
self.disable_service_logs,
)[:1024]
def to_generated(self):
return _latest_preview_models.PiiTask(
parameters=_latest_preview_models.PiiTaskParameters(
model_version=self.model_version,
domain=self.domain_filter,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs
)
)
class ExtractKeyPhrasesAction(DictMixin):
"""ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase
extraction operation
If you just want to extract key phrases from a list of documents, and not perform multiple
long running actions on the input of documents, call method `extract_key_phrases` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "ExtractKeyPhrasesAction(model_version={}, disable_service_logs={})" \
.format(self.model_version, self.disable_service_logs)[:1024]
def to_generated(self):
return _latest_preview_models.KeyPhrasesTask(
parameters=_latest_preview_models.KeyPhrasesTaskParameters(
model_version=self.model_version,
logging_opt_out=self.disable_service_logs,
)
)
class RecognizeLinkedEntitiesAction(DictMixin):
"""RecognizeLinkedEntitiesAction encapsulates the parameters for starting a long-running Linked Entities
Recognition operation.
If you just want to recognize linked entities in a list of documents, and not perform multiple
long running actions on the input of documents, call method `recognize_linked_entities` instead
of interfacing with this model.
:keyword str model_version: The model version to use for the analysis.
:keyword str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:keyword bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
:ivar str model_version: The model version to use for the analysis.
:ivar str string_index_type: Specifies the method used to interpret string offsets.
`UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information
see https://aka.ms/text-analytics-offsets
:ivar bool disable_service_logs: If set to true, you opt-out of having your text input
logged on the service side for troubleshooting. By default, Text Analytics logs your
input text for 48 hours, solely to allow for troubleshooting issues in providing you with
the Text Analytics natural language processing functions. Setting this parameter to true,
disables input logging and may limit our ability to remediate issues that occur. Please see
Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
additional details, and Microsoft Responsible AI principles at
https://www.microsoft.com/ai/responsible-ai.
"""
def __init__(self, **kwargs):
self.model_version = kwargs.get("model_version", "latest")
self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint")
self.disable_service_logs = kwargs.get("disable_service_logs", False)
def __repr__(self, **kwargs):
return "RecognizeLinkedEntitiesAction(model_version={}, string_index_type={}), " \
"disable_service_logs={}".format(
self.model_version, self.string_index_type, self.disable_service_logs
)[:1024]
def to_generated(self):
return _latest_preview_models.EntityLinkingTask(
parameters=_latest_preview_models.EntityLinkingTaskParameters(
model_version=self.model_version,
string_index_type=self.string_index_type,
logging_opt_out=self.disable_service_logs,
)
)
| 45.808858 | 142 | 0.686113 |
0415f2dec54208ad478c239ed38b100a6e3df5fe | 725 | py | Python | peering/fields.py | charlesmonson/peering-manager | b67532ebb245a6384106385f8dbf0d87dbf87a13 | [
"Apache-2.0"
] | null | null | null | peering/fields.py | charlesmonson/peering-manager | b67532ebb245a6384106385f8dbf0d87dbf87a13 | [
"Apache-2.0"
] | null | null | null | peering/fields.py | charlesmonson/peering-manager | b67532ebb245a6384106385f8dbf0d87dbf87a13 | [
"Apache-2.0"
] | 1 | 2020-03-31T02:04:39.000Z | 2020-03-31T02:04:39.000Z | from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db import models
class ASNField(models.BigIntegerField):
description = "32-bit ASN field"
default_validators = [MinValueValidator(1), MaxValueValidator(4294967295)]
class CommunityField(models.CharField):
description = "BGP community or large community field"
default_validators = [
# BGP community and BGP large community
RegexValidator(r"^(\d{1,5}:\d{1,5})|(\d{1,10}:\d{1,10}:\d{1,10}:\d{1,10})$")
]
class TTLField(models.PositiveSmallIntegerField):
description = "TTL field allowing value from 0 to 255"
default_validators = [MinValueValidator(1), MaxValueValidator(255)]
| 34.52381 | 87 | 0.731034 |
5eaab3542f7dd884e61c77f8af27b9d8e0da5a84 | 39 | py | Python | job/schedule_funcs/__init__.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | null | null | null | job/schedule_funcs/__init__.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | null | null | null | job/schedule_funcs/__init__.py | RockFeng0/flask_demo | cf5631935de628f65c37f32aa7875cecc37f707b | [
"MIT"
] | 1 | 2020-06-27T14:04:53.000Z | 2020-06-27T14:04:53.000Z | #! python3
# -*- encoding: utf-8 -*-
| 7.8 | 25 | 0.487179 |
4fca296c707fb5408d6ee409dbbf1da27366343e | 2,333 | py | Python | Env/opter.py | hassaanakram/Resource-Allocation-using-deeprl | f36bfb6ba9956a0b072421a8feb8034428571403 | [
"MIT"
] | 33 | 2020-05-19T10:56:45.000Z | 2022-03-08T11:40:53.000Z | Env/opter.py | chisyliu/Resource-Allocation-using-deeprl | fec93a99177115ee32652483fd1005cdbf67ae56 | [
"MIT"
] | 5 | 2020-05-11T12:41:12.000Z | 2022-03-01T10:49:36.000Z | Env/opter.py | chisyliu/Resource-Allocation-using-deeprl | fec93a99177115ee32652483fd1005cdbf67ae56 | [
"MIT"
] | 17 | 2020-02-09T10:50:41.000Z | 2022-03-16T01:57:54.000Z | from cvxopt import matrix, spmatrix, spdiag, solvers
class CvxOpt:
def __init__(self):
self.NUM_RRH = 0
self.NUM_USR = 0
self.H = matrix()
self.__clear__()
def __clear__(self):
self._G = []
self._hq = []
self.c = matrix(0., (self.NUM_RRH * self.NUM_USR + 1, 1))
self.c[0, 0] = 1
self.H = matrix()
def feed(self, h=matrix(), cof=matrix(), p=matrix(), theta=matrix(), num_rrh=0, num_usr=0):
self.NUM_RRH = num_rrh
self.NUM_USR = num_usr
self.__clear__()
self.H = h
for k_ in range(self.NUM_USR):
w = [[0 for i in range(self.NUM_USR + 2)] for k in range(self.NUM_RRH * self.NUM_USR)]
for l in range(self.NUM_RRH):
for k in range(self.NUM_USR):
if k == k_:
w[l * self.NUM_USR + k][0] = - cof[k_] * h[l, k_]
w[l * self.NUM_USR + k][k + 1] = h[l, k_] * -1.
w.insert(0, [0 for i in range(self.NUM_USR + 2)])
self._G += [matrix(w)]
self._hq += [spmatrix([theta[k_]], [self.NUM_USR + 2 - 1], [0], (self.NUM_USR + 2, 1))]
for l_ in range(self.NUM_RRH):
sp_value = []
sp_index_i = []
sp_index_j = []
for k in range(self.NUM_USR):
sp_value.append(-1.)
sp_index_i.append(1 + l_ * self.NUM_USR + k)
sp_index_j.append(1 + k)
P = spmatrix(sp_value, sp_index_i, sp_index_j, size=(self.NUM_RRH * self.NUM_USR + 1, 1 + self.NUM_USR))
self._G += [P.T]
self._hq += [spmatrix([p[l_]], [0], [0], (1 + self.NUM_USR, 1))]
self._hq += [matrix(0., (1, self.NUM_USR * self.NUM_RRH + 1)).T]
d = matrix(-1., (1, self.NUM_USR * self.NUM_RRH + 1))
D = spdiag(d)
self._G += [D.T]
def solve(self):
solvers.options['show_progress'] = False
# [print(i) for i in self._hq]
sol = solvers.socp(self.c, Gq=self._G, hq=self._hq)
return sol
def showParams(self, sol):
print('H:')
print(self.H)
print('C:')
print(self.c)
print('G:')
for i in self._G:
print(i)
print('Hq:')
for i in self._hq:
print(i)
| 32.859155 | 116 | 0.487355 |
a28e087339b66f42a3091bb11e275ccbf3d645c0 | 3,186 | py | Python | misc/python_sealog/misc.py | WHOIGit/ndsf-sealog-server | e57843e3e23a924ccf6fc1ef1e40d92f36a3b612 | [
"MIT"
] | 4 | 2019-10-29T21:53:13.000Z | 2021-12-02T00:38:42.000Z | misc/python_sealog/misc.py | WHOIGit/ndsf-sealog-server | e57843e3e23a924ccf6fc1ef1e40d92f36a3b612 | [
"MIT"
] | 14 | 2020-05-28T16:39:30.000Z | 2021-05-22T06:01:40.000Z | misc/python_sealog/misc.py | WHOIGit/ndsf-sealog-server | e57843e3e23a924ccf6fc1ef1e40d92f36a3b612 | [
"MIT"
] | 1 | 2020-01-31T00:00:42.000Z | 2020-01-31T00:00:42.000Z | #!/usr/bin/env python3
'''
FILE: misc.py
DESCRIPTION: This script contains miscellaneous wrapper functions for the
sealog-server api routes.
BUGS:
NOTES:
AUTHOR: Webb Pinner
COMPANY: OceanDataTools.org
VERSION: 0.1
CREATED: 2021-01-01
REVISION:
LICENSE INFO: This code is licensed under MIT license (see LICENSE.txt for details)
Copyright (C) OceanDataTools.org 2021
'''
import json
import logging
import requests
from .settings import API_SERVER_URL, API_SERVER_FILE_PATH, HEADERS, EVENT_AUX_DATA_API_PATH
DATA_SOURCE_FILTER = ['vehicleRealtimeFramegrabberData']
IMAGE_PATH = API_SERVER_FILE_PATH + "/images"
def get_framegrab_list_by_lowering(lowering_uid, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Get the list of framegrabs for the given lowering_uid
'''
logging.debug("Exporting event data")
query='&data_source='.join(DATA_SOURCE_FILTER)
framegrab_filenames = []
try:
url = api_server_url + EVENT_AUX_DATA_API_PATH + '/bylowering/' + lowering_uid + '?datasource=' + query
logging.debug("URL: %s", url)
req = requests.get(url, headers=headers)
if req.status_code != 404:
framegrabs = json.loads(req.text)
for data in framegrabs:
for framegrab in data['data_array']:
if framegrab['data_name'] == 'filename':
framegrab_filenames.append(framegrab['data_value'])
except Exception as error:
logging.error(str(error))
return framegrab_filenames
def get_framegrab_list_by_cruise(cruise_uid, api_server_url=API_SERVER_URL, headers=HEADERS):
'''
Get the list of framegrabs for the given cruise_uid
'''
logging.debug("Exporting event data")
query='&data_source='.join(DATA_SOURCE_FILTER)
framegrab_filenames = []
try:
url = api_server_url + EVENT_AUX_DATA_API_PATH + '/bycruise/' + cruise_uid + '?datasource=' + query
logging.debug("URL: %s", url)
req = requests.get(url, headers=headers)
if req.status_code != 404:
framegrabs = json.loads(req.text)
for data in framegrabs:
for framegrab in data['data_array']:
if framegrab['data_name'] == 'filename':
framegrab_filenames.append(framegrab['data_value'])
except Exception as error:
logging.error(str(error))
return framegrab_filenames
def get_framegrab_list_by_file(filename):
'''
Get the list of framegrabs based on the contents of the given file
'''
logging.debug(filename)
framegrab_filenames = []
try:
with open(filename, 'r') as file:
framegrab_list = json.loads(file.read())
for data in framegrab_list:
if data['data_source'] in DATA_SOURCE_FILTER:
for framegrab in data['data_array']:
if framegrab['data_name'] == 'filename':
framegrab_filenames.append(framegrab['data_value'])
except Exception as error:
logging.error(str(error))
return framegrab_filenames
| 30.342857 | 111 | 0.645951 |
6b03ae40aca0177af209394a1fb16dbf4d54975a | 6,974 | py | Python | tests/adapter/test_file.py | raember/spoofbot | 64575eb515ae9eb293efa7380830e38ead602efa | [
"MIT"
] | null | null | null | tests/adapter/test_file.py | raember/spoofbot | 64575eb515ae9eb293efa7380830e38ead602efa | [
"MIT"
] | null | null | null | tests/adapter/test_file.py | raember/spoofbot | 64575eb515ae9eb293efa7380830e38ead602efa | [
"MIT"
] | null | null | null | import logging
import unittest
from pathlib import Path
from requests import Session
from urllib3.util import parse_url
from spoofbot.adapter import FileCache
from spoofbot.util import to_filepath
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('urllib3.connectionpool').setLevel(logging.INFO)
DUCKDUCKGO = parse_url('https://www.duckduckgo.com/')
DUCKDUCKGO_NO_REDIRECT = parse_url('https://duckduckgo.com/')
HTTPBIN = parse_url('https://httpbin.org/')
HTTPBIN_ANYTHING = parse_url('https://httpbin.org/anything')
HTTPBIN_ANYTHING2 = parse_url('https://httpbin.org/anything2')
HTTPBIN_HEADERS = parse_url('https://httpbin.org/headers')
p = Path(__file__).parent.parent.parent
class CacheAdapterTest(unittest.TestCase):
cache_adapter: FileCache = None
session: Session = None
@classmethod
def setUpClass(cls) -> None:
cls.cache_adapter = FileCache(p / 'tests/adapter/.cache')
cls.session = Session()
cls.session.mount('http://', cls.cache_adapter)
cls.session.mount('https://', cls.cache_adapter)
def test_request_hit(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
# If we request the same url twice, the second time is bound to be a hit
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertTrue(self.cache_adapter.hit)
def test_request_miss(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
self.cache_adapter.delete(DUCKDUCKGO_NO_REDIRECT)
# If we request a url that is not cached, it won't be a hit
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertFalse(self.cache_adapter.hit)
def test_request_backup(self):
self.cache_adapter.is_active = False
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
self.cache_adapter.delete(DUCKDUCKGO_NO_REDIRECT)
# If we have a response cached already, but bypass the cache to request data from the remote,
# we expect the cache to create a backup of the original cached response.
backup = self.cache_adapter.backup()
self.assertEqual(backup, self.cache_adapter.backup_data)
self.assertEqual(0, len(backup.requests))
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertFalse(self.cache_adapter.hit)
self.assertEqual(1, len(backup.requests))
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertFalse(self.cache_adapter.hit)
self.assertEqual(2, len(backup.requests))
self.assertEqual(DUCKDUCKGO_NO_REDIRECT.url, backup.requests[0][0].url)
self.assertTrue(backup.requests[1][1].startswith(b'<!DOCTYPE html>'))
backup.stop_backup()
self.assertIsNone(self.cache_adapter.backup_data)
def test_request_backup_with(self):
self.cache_adapter.is_active = False
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
self.cache_adapter.delete(DUCKDUCKGO_NO_REDIRECT)
# If we have a response cached already, but bypass the cache to request data from the remote,
# we expect the cache to create a backup of the original cached response.
with self.cache_adapter.backup() as backup:
self.assertEqual(backup, self.cache_adapter.backup_data)
self.assertEqual(0, len(backup.requests))
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertFalse(self.cache_adapter.hit)
self.assertEqual(1, len(backup.requests))
self.assertIsNotNone(self.session.get(DUCKDUCKGO_NO_REDIRECT))
self.assertFalse(self.cache_adapter.hit)
self.assertEqual(2, len(backup.requests))
self.assertEqual(DUCKDUCKGO_NO_REDIRECT.url, backup.requests[0][0].url)
self.assertTrue(backup.requests[1][1].startswith(b'<!DOCTYPE html>'))
self.assertIsNone(self.cache_adapter.backup_data)
def test_delete(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
self.session.get(HTTPBIN_ANYTHING, headers={'Accept': 'text/json'})
# If we have cached responses, after deleting them and requesting the same url, it will be a miss
self.cache_adapter.delete(HTTPBIN_ANYTHING)
self.session.get(HTTPBIN_ANYTHING, headers={'Accept': 'text/json'})
self.assertFalse(self.cache_adapter.hit)
def test_delete_last(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
resp = self.session.get(HTTPBIN_HEADERS, headers={'Accept': 'text/json'})
# If we delete the last response cached, a new request to that url will be a miss
to_filepath(HTTPBIN_HEADERS, self.cache_adapter.cache_path, self.cache_adapter.ignore_queries).unlink()
self.session.get(HTTPBIN_HEADERS, headers={'Accept': 'text/json'})
self.assertFalse(self.cache_adapter.hit)
def test_would_hit(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
resp = self.session.get(HTTPBIN_ANYTHING, headers={'Accept': 'text/json'})
# If we check for a cached response that we requested earlier, it will show as a hit, unless deleted
self.assertTrue(self.cache_adapter.is_hit(HTTPBIN_ANYTHING))
to_filepath(HTTPBIN_ANYTHING, self.cache_adapter.cache_path, self.cache_adapter.ignore_queries).unlink()
self.assertFalse(self.cache_adapter.is_hit(HTTPBIN_ANYTHING))
def test_with_mode(self):
self.cache_adapter.is_active = True
self.cache_adapter.is_passive = True
self.cache_adapter.is_offline = False
with self.cache_adapter.use_mode(True, False, False):
self.assertTrue(self.cache_adapter.is_active)
self.assertFalse(self.cache_adapter.is_passive)
self.assertFalse(self.cache_adapter.is_offline)
with self.cache_adapter.use_mode(True, False, True):
self.assertTrue(self.cache_adapter.is_active)
self.assertFalse(self.cache_adapter.is_passive)
self.assertTrue(self.cache_adapter.is_offline)
self.assertTrue(self.cache_adapter.is_active)
self.assertFalse(self.cache_adapter.is_passive)
self.assertFalse(self.cache_adapter.is_offline)
self.assertTrue(self.cache_adapter.is_active)
self.assertTrue(self.cache_adapter.is_passive)
self.assertFalse(self.cache_adapter.is_offline)
| 47.767123 | 112 | 0.71107 |
32e4f37e915b98bf0a7f50ac065491effbd5c068 | 60 | py | Python | examples/mappings/__init__.py | diSimplex/tex2tex | f262a6b16b8e41b3c2701f0a85b89f16c12e4fbc | [
"Apache-2.0"
] | null | null | null | examples/mappings/__init__.py | diSimplex/tex2tex | f262a6b16b8e41b3c2701f0a85b89f16c12e4fbc | [
"Apache-2.0"
] | null | null | null | examples/mappings/__init__.py | diSimplex/tex2tex | f262a6b16b8e41b3c2701f0a85b89f16c12e4fbc | [
"Apache-2.0"
] | null | null | null | # This file simply marks this directory as a Python package
| 30 | 59 | 0.8 |
4cd987b81c272ac7bcd121c6420fc0c02f87b2cb | 77,263 | py | Python | meerk40t/gui/rasterwizard.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | meerk40t/gui/rasterwizard.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | meerk40t/gui/rasterwizard.py | joerlane/meerk40t | a75d78848ff1682640e112111fb6ac4e23e08616 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.6 on Wed Jul 22 19:31:45 2020
#
import threading
from copy import deepcopy
from math import ceil
import wx
from meerk40t.kernel import signal_listener
from ..core.units import (
UNITS_CM,
UNITS_INCH,
UNITS_MM,
UNITS_PER_CM,
UNITS_PER_INCH,
UNITS_PER_MM,
UNITS_PERCENT,
)
from ..image.imagetools import RasterScripts
from ..svgelements import Matrix
from .icons import icons8_fantasy_50
from .laserrender import LaserRender
from .mwindow import MWindow
from .zmatrix import ZMatrix
_ = wx.GetTranslation
MILS_IN_MM = 39.3701
class RasterWizardPanel(wx.Panel):
def __init__(self, *args, context=None, script=None, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
if script is None:
script = "Gravy"
self._preview_panel_buffer = None
self.matrix = Matrix()
self.previous_window_position = None
self.previous_scene_position = None
self.node = None
self.wx_bitmap_image = None
self.image_width, self.image_height = None, None
self.ops = dict()
self.script = script
self.selected_op = None
self.wizard_thread = None
self.focus_factor = None
self.needs_centering = True
self.needs_update = True
self.sizer_operation_panels = None
self.panel_preview = wx.Panel(self, wx.ID_ANY)
# self.panel_preview = ScenePanel(
# self.context,
# self,
# scene_name="rasterwizard",
# style=wx.EXPAND | wx.WANTS_CHARS,
# )
# self.widget_scene = self.panel_preview.scene
list_choices = []
if self.ops is not None:
list_choices = [_(op["name"]) for op in self.ops]
self.list_operation = wx.ListBox(self, wx.ID_ANY, choices=list_choices)
self.button_operations = wx.BitmapButton(
self, wx.ID_ANY, icons8_fantasy_50.GetBitmap()
)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LISTBOX, self.on_list_operation, self.list_operation)
self.Bind(wx.EVT_BUTTON, self.on_buttons_operations, self.button_operations)
# end wxGlade
self.panel_preview.Bind(wx.EVT_PAINT, self.on_preview_panel_paint)
self.panel_preview.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
self.panel_preview.Bind(wx.EVT_MOTION, self.on_preview_mouse_move)
self.panel_preview.Bind(wx.EVT_MOUSEWHEEL, self.on_preview_mousewheel)
self.panel_preview.Bind(wx.EVT_MIDDLE_UP, self.on_preview_mouse_middle_up)
self.panel_preview.Bind(wx.EVT_MIDDLE_DOWN, self.on_preview_mouse_middle_down)
self.panel_preview.Bind(wx.EVT_LEFT_DOWN, self.on_preview_mouse_left_down)
self.panel_preview.Bind(wx.EVT_LEFT_UP, self.on_preview_mouse_left_up)
self.panel_preview.Bind(wx.EVT_RIGHT_DOWN, self.on_preview_mouse_right_down)
self.panel_preview.Bind(
wx.EVT_ENTER_WINDOW, lambda event: self.panel_preview.SetFocus()
) # Focus follows mouse.
self.on_size(None)
self.Bind(wx.EVT_SIZE, self.on_size, self)
self.thread_update_lock = threading.Lock()
def set_wizard_script(self, name=None, ops=None):
if name is None:
if ops is None:
return
self.ops = ops
else:
self.ops = deepcopy(self.context.lookup("raster_script", name))
self.list_operation.Clear()
if self.ops is not None:
list_choices = [_(op["name"]) for op in self.ops]
for c in list_choices:
self.list_operation.Append(c)
self.needs_update = True
self.wiz_img()
self.Layout()
self.Refresh()
self.on_size()
def pane_show(self):
if self.script is not None:
self.set_wizard_script(self.script)
self.context.signal("RasterWizard-Image")
if self.list_operation.GetCount() > 0:
self.list_operation.SetSelection(0)
def pane_hide(self):
pass
def __set_properties(self):
self.panel_preview.SetToolTip(_("Processed image preview"))
self.list_operation.SetToolTip(
_("Image operations applied in order to generate a raster image.")
)
self.button_operations.SetBackgroundColour(wx.Colour(0, 255, 0))
self.button_operations.SetToolTip(_("Process Image and Export"))
self.button_operations.SetSize(self.button_operations.GetBestSize())
self.panel_select_op()
# end wxGlade
def __do_layout(self):
# begin wxGlade: RasterWizard.__do_layout
sizer_frame = wx.BoxSizer(wx.VERTICAL)
sizer_main = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_operation_panels = wx.BoxSizer(wx.VERTICAL)
sizer_list = wx.BoxSizer(wx.VERTICAL)
sizer_frame.Add(self.panel_preview, 2, wx.EXPAND, 0)
sizer_list.Add(self.list_operation, 1, wx.EXPAND, 0)
sizer_list.Add(self.button_operations, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
w, h = self.button_operations.Size
self.list_operation.SetMinSize(
(-1, 301 - h)
) # 301 is the height of Tone Panel.
self.list_operation.SetMaxSize((-1, 301 - h))
sizer_main.Add(sizer_list, 0, wx.EXPAND, 0)
sizer_main.Add(self.sizer_operation_panels, 1, wx.EXPAND, 0)
sizer_frame.Add(sizer_main, 1, wx.EXPAND, 0)
self.SetSizer(sizer_frame)
self.Layout()
# end wxGlade
def wiz_img(self):
if self.node is None:
with self.thread_update_lock:
self.wizard_thread = None
return
while self.needs_update:
self.needs_update = False
if self.ops is None:
pass
else:
self.node.image, self.node.matrix, step = RasterScripts.wizard_image(
self.node, self.ops
)
if step is not None:
self.node.step_x = step
self.node.step_y = step
self.wx_bitmap_image = None
if self.context is None:
with self.thread_update_lock:
self.wizard_thread = None
return
self.context.signal("RasterWizard-Refresh")
if self.focus_factor is not None:
scale = self.focus_factor
self.scene_post_scale(
scale, scale, self.matrix.value_trans_x(), self.matrix.value_trans_y()
)
self.focus_factor = None
if self.pil_image is not None and self.needs_centering:
box = self.pil_image.getbbox()
if box is not None:
self.focus_viewport_scene(box, self._preview_panel_buffer.Size)
self.needs_centering = False
with self.thread_update_lock:
self.wizard_thread = None
@signal_listener("emphasized")
def on_emphasis_change(self, origin, *args):
for node in self.context.elements.elems(emphasized=True):
if node.type == "elem image":
self.node = node
self.context.signal("RasterWizard-Image")
if self.ops is not None:
self.panel_select_op()
self.needs_centering = True
break
def panel_select_op(self):
if self.sizer_operation_panels is None:
return
self.sizer_operation_panels.Clear(True)
select = self.list_operation.GetSelection()
try:
self.selected_op = self.ops[select]
except KeyError:
pass
op = self.selected_op
name_op = op["name"]
if name_op == "crop":
panel = CropPanel(self, wx.ID_ANY)
elif name_op == "resample":
panel = ResamplePanel(self, wx.ID_ANY)
elif name_op == "grayscale":
panel = GrayscalePanel(self, wx.ID_ANY)
elif name_op == "tone":
panel = ToneCurvePanel(self, wx.ID_ANY)
elif name_op == "gamma":
panel = GammaPanel(self, wx.ID_ANY)
elif name_op == "unsharp_mask":
panel = SharpenPanel(self, wx.ID_ANY)
elif name_op == "dither":
panel = DitherPanel(self, wx.ID_ANY)
elif name_op == "output":
panel = OutputPanel(self, wx.ID_ANY)
elif name_op == "contrast":
panel = ContrastPanel(self, wx.ID_ANY)
elif name_op == "halftone":
panel = HalftonePanel(self, wx.ID_ANY)
else:
panel = BasicPanel(self, wx.ID_ANY)
if panel is None:
return
self.sizer_operation_panels.Add(panel, 1, wx.EXPAND, 0)
panel.set_operation(self.context, op, node=self.node)
self.Layout()
def on_size(self, event=None):
self.Layout()
width, height = self.panel_preview.Size
if width <= 0:
width = 1
if height <= 0:
height = 1
self._preview_panel_buffer = wx.Bitmap(width, height)
self.update_in_gui_thread()
self.Layout()
self.Refresh()
self.Update()
def on_preview_panel_paint(self, event=None):
try:
wx.BufferedPaintDC(self.panel_preview, self._preview_panel_buffer)
except RuntimeError:
pass
def update_in_gui_thread(self):
self.on_update_buffer()
try:
self.Refresh(True)
self.Update()
except RuntimeError:
pass
def on_update_buffer(self, event=None):
dc = wx.MemoryDC()
dc.SelectObject(self._preview_panel_buffer)
dc.Clear()
gc = wx.GraphicsContext.Create(dc)
gc.SetBrush(wx.WHITE_BRUSH)
w, h = self._preview_panel_buffer.GetSize()
gc.DrawRectangle(0, 0, w, h)
if self.context is None or self.node is None or self.pil_image is None:
font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
if self.wizard_thread is not None and self.wizard_thread.is_alive():
gc.DrawText(_("Processing..."), 0, 0)
else:
gc.DrawText(_("No image..."), 0, 0)
gc.Destroy()
return
gc.PushState()
gc.SetTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(self.matrix)))
wx_bitmap = self.wx_bitmap_image
if wx_bitmap is None:
renderer = LaserRender(self.context)
self.wx_bitmap_image = renderer.make_thumbnail(self.pil_image)
width, height = self.pil_image.size
gc.DrawBitmap(self.wx_bitmap_image, 0, 0, width, height)
gc.PopState()
font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)
gc.SetFont(font, wx.BLACK)
if self.wizard_thread is not None:
gc.DrawText(_("Processing..."), 0, 0)
gc.Destroy()
dc.SelectObject(wx.NullBitmap)
del dc
def convert_scene_to_window(self, position):
point = self.matrix.point_in_matrix_space(position)
return point[0], point[1]
def convert_window_to_scene(self, position):
point = self.matrix.point_in_inverse_space(position)
return point[0], point[1]
def on_preview_mouse_move(self, event):
if self.previous_window_position is None:
return
pos = event.GetPosition()
window_position = pos.x, pos.y
scene_position = self.convert_window_to_scene(
[window_position[0], window_position[1]]
)
# sdx = scene_position[0] - self.previous_scene_position[0]
# sdy = scene_position[1] - self.previous_scene_position[1]
wdx = window_position[0] - self.previous_window_position[0]
wdy = window_position[1] - self.previous_window_position[1]
self.scene_post_pan(wdx, wdy)
self.previous_window_position = window_position
self.previous_scene_position = scene_position
def on_preview_mousewheel(self, event):
rotation = event.GetWheelRotation()
mouse = event.GetPosition()
if self.context.mouse_zoom_invert:
rotation = -rotation
if rotation > 1:
self.scene_post_scale(1.1, 1.1, mouse[0], mouse[1])
elif rotation < -1:
self.scene_post_scale(0.9, 0.9, mouse[0], mouse[1])
def on_preview_mouse_right_down(self, event=None):
gui = self
menu = wx.Menu()
sub_menu = wx.Menu()
try:
for script_name in self.context.match("raster_script", suffix=True):
gui.Bind(
wx.EVT_MENU,
self.set_script(script_name),
sub_menu.Append(wx.ID_ANY, script_name, "", wx.ITEM_NORMAL),
)
menu.Append(wx.ID_ANY, _("Raster Script"), sub_menu)
except KeyError:
pass
if menu.MenuItemCount != 0:
gui.PopupMenu(menu)
menu.Destroy()
def set_script(self, name):
def script(event=None):
self.set_wizard_script(name)
return script
def on_preview_mouse_left_down(self, event):
self.previous_window_position = event.GetPosition()
self.previous_scene_position = self.convert_window_to_scene(
self.previous_window_position
)
def on_preview_mouse_left_up(self, event=None):
self.previous_window_position = None
self.previous_scene_position = None
def on_preview_mouse_middle_down(self, event):
self.previous_window_position = event.GetPosition()
self.previous_scene_position = self.convert_window_to_scene(
self.previous_window_position
)
def on_preview_mouse_middle_up(self, event=None):
self.previous_window_position = None
self.previous_scene_position = None
def focus_viewport_scene(
self, new_scene_viewport, scene_size, buffer=0.0, lock=True
):
"""
Focus on the given viewport in the scene.
@param new_scene_viewport: Viewport to have after this process within the scene.
@param scene_size: Size of the scene in which this viewport is active.
@param buffer: Amount of buffer around the edge of the new viewport.
@param lock: lock the scalex, scaley.
@return:
"""
window_width, window_height = scene_size
left = new_scene_viewport[0]
top = new_scene_viewport[1]
right = new_scene_viewport[2]
bottom = new_scene_viewport[3]
viewport_width = right - left
viewport_height = bottom - top
left -= viewport_width * buffer
right += viewport_width * buffer
top -= viewport_height * buffer
bottom += viewport_height * buffer
if right == left:
scale_x = 100
else:
scale_x = window_width / float(right - left)
if bottom == top:
scale_y = 100
else:
scale_y = window_height / float(bottom - top)
cx = (right + left) / 2
cy = (top + bottom) / 2
self.matrix.reset()
self.matrix.post_translate(-cx, -cy)
if lock:
scale = min(scale_x, scale_y)
if scale != 0:
self.matrix.post_scale(scale)
else:
if scale_x != 0 and scale_y != 0:
self.matrix.post_scale(scale_x, scale_y)
self.matrix.post_translate(window_width / 2.0, window_height / 2.0)
def scene_post_pan(self, px, py):
self.matrix.post_translate(px, py)
self.context.signal("RasterWizard-Refresh")
def scene_post_scale(self, sx, sy=None, ax=0, ay=0):
self.matrix.post_scale(sx, sy, ax, ay)
self.context.signal("RasterWizard-Refresh")
@signal_listener("RasterWizard-Refocus")
def on_raster_wizard_refocus_signal(self, origin, factor, *args):
"""Processes the image signal but flags this as needing refocusing."""
self.focus_factor = factor
self.on_raster_wizard_image_signal(origin, *args)
@signal_listener("RasterWizard-Image")
def on_raster_wizard_image_signal(self, origin, *args):
"""Processes the refresh. Runs through a signal to prevent mass refresh stacking."""
with self.thread_update_lock:
self.needs_update = True
if self.wizard_thread is None:
self.wizard_thread = self.context.threaded(self.wiz_img)
self.context.signal("RasterWizard-Refresh")
@signal_listener("RasterWizard-Refresh")
def on_raster_wizard_refresh_signal(self, origin, *args):
"""Processes the refresh. Runs through a signal to prevent mass refresh stacking."""
if wx.IsMainThread():
self.update_in_gui_thread()
else:
wx.CallAfter(self.update_in_gui_thread)
def on_list_operation(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.panel_select_op()
def on_buttons_operations(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
# TODO: broken
if self.wizard_thread is not None:
return
if self.node is not None:
self.node.image = self.pil_image
self.node.values["raster_step"] = self.step_image
self.node.transform = self.matrix_image
(
self.node.image_width,
self.node.image_height,
) = self.pil_image.size
self.node.lock = True
try:
self.node.node.object = self.node
self.node.node.altered()
except AttributeError:
pass
try:
self.GetParent().Close()
except (TypeError, AttributeError):
pass
# end of class RasterWizard
class DitherPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: DitherPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_dither = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.choices = [
"Floyd-Steinberg",
"Atkinson",
"Jarvis-Judice-Ninke",
"Stucki",
"Burkes",
"Sierra3",
"Sierra2",
"Sierra-2-4a",
]
self.combo_dither = wx.ComboBox(
self,
wx.ID_ANY,
choices=self.choices,
style=wx.CB_DROPDOWN,
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_dither, self.check_enable_dither
)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_dither_type, self.combo_dither)
self.Bind(wx.EVT_TEXT_ENTER, self.on_combo_dither_type, self.combo_dither)
self.context = None
self.op = None
self.original_op = None
# end wxGlade
def __set_properties(self):
# begin wxGlade: DitherPanel.__set_properties
self.check_enable_dither.SetToolTip(_("Enable Dither"))
self.check_enable_dither.SetValue(1)
self.combo_dither.SetToolTip(_("Select dither algorithm to use"))
self.combo_dither.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: DitherPanel.__do_layout
sizer_dither_main = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Dither")), wx.VERTICAL
)
sizer_dither_main.Add(self.check_enable_dither, 0, 0, 0)
sizer_dither_main.Add(self.combo_dither, 0, 0, 0)
self.SetSizer(sizer_dither_main)
sizer_dither_main.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, node=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_dither.SetValue(op["enable"])
self.combo_dither.SetSelection(self.choices.index(self.op["type"]))
def on_check_enable_dither(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_dither.GetValue()
self.context.signal("RasterWizard-Image")
def on_combo_dither_type(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.op["type"] = self.choices[self.combo_dither.GetSelection()]
self.context.signal("RasterWizard-Image")
# end of class DitherPanel
class CropPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: CropPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self._crop_panel_buffer = None
self.check_enable_crop = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.button_reset_crop = wx.Button(self, wx.ID_ANY, _("Reset"))
self.image_view_panel = wx.Panel(self, wx.ID_ANY)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_enable_crop, self.check_enable_crop)
self.Bind(wx.EVT_BUTTON, self.on_button_reset_crop, self.button_reset_crop)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: CropPanel.__set_properties
self.check_enable_crop.SetToolTip(_("Enable Cropping"))
self.check_enable_crop.SetValue(1)
self.button_reset_crop.SetToolTip(_("Reset Cropping"))
self.image_view_panel.SetToolTip(_("Crop field"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: CropPanel.__do_layout
sizer_crop = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Crop")), wx.VERTICAL
)
sizer_crop_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_crop_main.Add(self.check_enable_crop, 0, 0, 0)
sizer_crop_main.Add(self.button_reset_crop, 0, 0, 0)
sizer_crop.Add(sizer_crop_main, 0, wx.EXPAND, 0)
sizer_crop.Add(self.image_view_panel, 1, wx.EXPAND, 0)
self.SetSizer(sizer_crop)
sizer_crop.Fit(self)
self.Layout()
# end wxGlade
def update_in_gui_thread(self):
# self.on_update_buffer()
# self.on_update_tone()
try:
self.Refresh(True)
self.Update()
except RuntimeError:
pass
def set_operation(self, context, op, node=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_crop.SetValue(op["enable"])
self.Layout()
width, height = self.image_view_panel.Size
if width <= 0:
width = 1
if height <= 0:
height = 1
self._crop_panel_buffer = wx.Bitmap(width, height)
self.update_in_gui_thread()
self.check_enable_crop.SetValue(op["enable"])
def on_check_enable_crop(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_crop.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_crop(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.op["bounds"] = self.original_op["bounds"]
self.context.signal("RasterWizard-Image")
# end of class CropPanel
class ResamplePanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: ResamplePanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_resample = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.text_resample_width = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.check_resample_maintain_aspect = wx.CheckBox(
self, wx.ID_ANY, _("Maintain Aspect Ratio")
)
self.text_resample_height = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.combo_resample_units = wx.ComboBox(
self,
wx.ID_ANY,
choices=[_("pixels"), _("percent"), _("inches"), _("mm"), _("cm")],
style=wx.CB_DROPDOWN,
)
self.text_resample_pixels = wx.TextCtrl(
self, wx.ID_ANY, "1000 x 1000 pixels", style=wx.TE_READONLY
)
self.combo_resample_step = wx.ComboBox(
self,
wx.ID_ANY,
choices=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
style=wx.CB_DROPDOWN,
)
self.combo_resample_dpi = wx.ComboBox(
self,
wx.ID_ANY,
choices=[
"1000",
"500",
"333",
"250",
"200",
"167",
"143",
"125",
"111",
"100",
],
style=wx.CB_DROPDOWN,
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_resample, self.check_enable_resample
)
self.Bind(wx.EVT_TEXT, self.on_text_resample_width, self.text_resample_width)
self.Bind(
wx.EVT_TEXT_ENTER, self.on_text_resample_width, self.text_resample_width
)
self.Bind(
wx.EVT_CHECKBOX,
self.on_check_resample_maintain_aspect,
self.check_resample_maintain_aspect,
)
self.Bind(wx.EVT_TEXT, self.on_text_resample_height, self.text_resample_height)
self.Bind(
wx.EVT_TEXT_ENTER, self.on_text_resample_height, self.text_resample_height
)
self.Bind(
wx.EVT_COMBOBOX, self.on_combo_resample_units, self.combo_resample_units
)
self.Bind(
wx.EVT_COMBOBOX, self.on_combo_resample_step, self.combo_resample_step
)
self.Bind(
wx.EVT_TEXT_ENTER, self.on_combo_resample_step, self.combo_resample_step
)
self.Bind(wx.EVT_COMBOBOX, self.on_combo_resample_dpi, self.combo_resample_dpi)
self.Bind(
wx.EVT_TEXT_ENTER, self.on_combo_resample_dpi, self.combo_resample_dpi
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
self.node = None
def __set_properties(self):
# begin wxGlade: ResamplePanel.__set_properties
self.check_enable_resample.SetToolTip(_("Enable Resampling"))
self.check_enable_resample.SetValue(1)
self.text_resample_width.SetToolTip(_("Image Width"))
self.check_resample_maintain_aspect.SetToolTip(
_("Maintain Aspect Ratio for Resample")
)
self.check_resample_maintain_aspect.SetValue(1)
self.text_resample_height.SetToolTip(_("Image Height"))
self.combo_resample_units.SetSelection(0)
self.combo_resample_step.SetToolTip(_("Image resample step"))
self.combo_resample_step.SetSelection(1)
self.combo_resample_dpi.SetToolTip(_("Image resample DPI at given step"))
self.combo_resample_dpi.SetSelection(1)
# end wxGlade
def __do_layout(self):
# begin wxGlade: ResamplePanel.__do_layout
sizer_resample = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Resample")), wx.VERTICAL
)
sizer_resample_step = wx.BoxSizer(wx.HORIZONTAL)
sizer_resample_height = wx.BoxSizer(wx.HORIZONTAL)
sizer_resample_width = wx.BoxSizer(wx.HORIZONTAL)
sizer_resample_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_resample_main.Add(self.check_enable_resample, 0, 0, 0)
sizer_resample.Add(sizer_resample_main, 0, wx.EXPAND, 0)
label_width = wx.StaticText(self, wx.ID_ANY, _("Width"))
label_width.SetMinSize((50, 16))
sizer_resample_width.Add(label_width, 0, 0, 0)
sizer_resample_width.Add(self.text_resample_width, 0, 0, 0)
sizer_resample_width.Add(self.check_resample_maintain_aspect, 0, 0, 0)
sizer_resample.Add(sizer_resample_width, 0, wx.EXPAND, 0)
label_height = wx.StaticText(self, wx.ID_ANY, _("Height"))
label_height.SetMinSize((50, 16))
sizer_resample_height.Add(label_height, 0, 0, 0)
sizer_resample_height.Add(self.text_resample_height, 0, 0, 0)
sizer_resample_height.Add(self.combo_resample_units, 0, 0, 0)
sizer_resample.Add(sizer_resample_height, 0, wx.EXPAND, 0)
sizer_resample.Add(self.text_resample_pixels, 0, 0, 0)
sizer_resample.Add((20, 20), 0, 0, 0)
label_step = wx.StaticText(self, wx.ID_ANY, _("Step"))
label_step.SetMinSize((50, 16))
sizer_resample_step.Add(label_step, 0, 0, 0)
sizer_resample_step.Add(self.combo_resample_step, 0, 0, 0)
sizer_resample_step.Add(self.combo_resample_dpi, 0, 0, 0)
label_ppi = wx.StaticText(self, wx.ID_ANY, _("pixels/in"))
sizer_resample_step.Add(label_ppi, 11, 0, 0)
sizer_resample.Add(sizer_resample_step, 0, wx.EXPAND, 0)
self.SetSizer(sizer_resample)
sizer_resample.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, node=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.node = node
self.check_enable_resample.SetValue(op["enable"])
self.combo_resample_step.SetSelection(op["step"] - 1)
self.combo_resample_dpi.SetSelection(op["step"] - 1)
self.combo_resample_units.SetSelection(op["units"])
self.check_resample_maintain_aspect.SetValue(op["aspect"])
self.refresh_dims()
if node is not None:
self.text_resample_pixels.SetValue(
_("%d x %d pixels") % (node.image.width, node.image.height)
)
def refresh_dims(self):
"""
This routine is responsible for setting the width and height values in the resample
pane of the raster wizard.
The size of that value is the result matrix and the image size.
Reduced by any amount of truncated edge.
"""
if self.node is None:
return
image = self.node.image
matrix = Matrix(self.node.matrix)
boundary_points = []
box = None
try:
rev = image.convert("L").point(lambda e: 255 - e)
box = rev.getbbox()
except TypeError:
pass
if box is None:
box = (0, 0, image.width, image.height)
top_left = matrix.point_in_matrix_space([box[0], box[1]])
top_right = matrix.point_in_matrix_space([box[2], box[1]])
bottom_left = matrix.point_in_matrix_space([box[0], box[3]])
bottom_right = matrix.point_in_matrix_space([box[2], box[3]])
boundary_points.append(top_left)
boundary_points.append(top_right)
boundary_points.append(bottom_left)
boundary_points.append(bottom_right)
xmin = min([e[0] for e in boundary_points])
ymin = min([e[1] for e in boundary_points])
xmax = max([e[0] for e in boundary_points])
ymax = max([e[1] for e in boundary_points])
bbox = xmin, ymin, xmax, ymax
width = int(ceil(bbox[2] - bbox[0]))
height = int(ceil(bbox[3] - bbox[1]))
units = self.op["units"]
# TODO: OP UNITS CONVERTS
if units == UNITS_PERCENT:
width = 100
height = 100
elif units == UNITS_INCH:
width /= UNITS_PER_INCH
height /= UNITS_PER_INCH
elif units == UNITS_MM:
width /= UNITS_PER_MM
height /= UNITS_PER_MM
elif units == UNITS_CM:
width /= UNITS_PER_CM
height /= UNITS_PER_CM
self.text_resample_height.SetValue(str(width))
self.text_resample_width.SetValue(str(height))
def on_check_enable_resample(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
self.op["enable"] = self.check_enable_resample.GetValue()
self.context.signal("RasterWizard-Image")
def on_text_resample_width(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
pass
# self.kernel.signal("RasterWizard-Image")
def on_check_resample_maintain_aspect(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
self.op["aspect"] = self.check_resample_maintain_aspect.GetValue()
self.context.signal("RasterWizard-Image")
def on_text_resample_height(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
pass
# self.kernel.signal("RasterWizard-Image")
def on_combo_resample_units(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
self.op["units"] = self.combo_resample_units.GetSelection()
self.refresh_dims()
def on_combo_resample_step(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
selected = self.combo_resample_step.GetSelection() + 1
current = self.op["step"]
if selected != current:
self.combo_resample_dpi.SetSelection(
self.combo_resample_step.GetSelection()
)
self.op["step"] = self.combo_resample_step.GetSelection() + 1
if current == 0:
current = 1
self.context.signal(
"RasterWizard-Refocus", float(selected) / float(current)
)
def on_combo_resample_dpi(
self, event=None
): # wxGlade: ResamplePanel.<event_handler>
selected = self.combo_resample_dpi.GetSelection() + 1
current = self.op["step"]
if selected != current:
self.combo_resample_step.SetSelection(
self.combo_resample_dpi.GetSelection()
)
self.op["step"] = self.combo_resample_dpi.GetSelection() + 1
if current == 0:
current = 1
self.context.signal(
"RasterWizard-Refocus", float(selected) / float(current)
)
# end of class ResamplePanel
class GammaPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: GammaPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_gamma = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.button_reset_gamma = wx.Button(self, wx.ID_ANY, _("Reset"))
self.slider_gamma_factor = wx.Slider(
self, wx.ID_ANY, 100, 0, 500, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_gamma_factor = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_enable_gamma, self.check_enable_gamma)
self.Bind(wx.EVT_BUTTON, self.on_button_reset_gamma, self.button_reset_gamma)
self.Bind(wx.EVT_SLIDER, self.on_slider_gamma_factor, self.slider_gamma_factor)
self.Bind(wx.EVT_TEXT, self.on_text_gamma_factor, self.text_gamma_factor)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
self.last_x = None
def __set_properties(self):
# begin wxGlade: GammaPanel.__set_properties
self.check_enable_gamma.SetToolTip(_("Enable Gamma Shift"))
self.check_enable_gamma.SetValue(1)
self.button_reset_gamma.SetToolTip(_("Reset Gamma Shift"))
self.slider_gamma_factor.SetToolTip(_("Gamma factor slider"))
self.text_gamma_factor.SetToolTip(_("Amount of gamma factor"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: GammaPanel.__do_layout
sizer_gamma = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Gamma")), wx.VERTICAL
)
sizer_gamma_factor = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Gamma Factor")), wx.HORIZONTAL
)
sizer_gamma_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_gamma_main.Add(self.check_enable_gamma, 0, 0, 0)
sizer_gamma_main.Add(self.button_reset_gamma, 0, 0, 0)
sizer_gamma.Add(sizer_gamma_main, 0, wx.EXPAND, 0)
sizer_gamma_factor.Add(self.slider_gamma_factor, 5, wx.EXPAND, 0)
sizer_gamma_factor.Add(self.text_gamma_factor, 1, 0, 0)
sizer_gamma.Add(sizer_gamma_factor, 0, wx.EXPAND, 0)
self.SetSizer(sizer_gamma)
sizer_gamma.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.text_gamma_factor.SetValue(str(op["factor"]))
self.slider_gamma_factor.SetValue(op["factor"] * 100.0)
self.check_enable_gamma.SetValue(op["enable"])
def on_check_enable_gamma(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_gamma.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_gamma(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["factor"] = self.original_op["factor"]
self.slider_gamma_factor.SetValue(self.op["factor"] * 100.0)
self.text_gamma_factor.SetValue(str(self.op["factor"]))
self.context.signal("RasterWizard-Image")
def on_slider_gamma_factor(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["factor"] = self.slider_gamma_factor.GetValue() / 100.0
self.text_gamma_factor.SetValue(str(self.op["factor"]))
self.context.signal("RasterWizard-Image")
def on_text_gamma_factor(self, event=None): # wxGlade: RasterWizard.<event_handler>
pass
# end of class GammaPanel
class GrayscalePanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: GrayscalePanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_grayscale = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.check_invert_grayscale = wx.CheckBox(self, wx.ID_ANY, _("Invert"))
self.slider_grayscale_red = wx.Slider(
self, wx.ID_ANY, 0, -1000, 1000, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_grayscale_red = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY)
self.slider_grayscale_green = wx.Slider(
self, wx.ID_ANY, 0, -1000, 1000, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_grayscale_green = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_grayscale_blue = wx.Slider(
self, wx.ID_ANY, 0, -1000, 1000, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_grayscale_blue = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_grayscale_lightness = wx.Slider(
self, wx.ID_ANY, 500, 0, 1000, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_grayscale_lightness = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_grayscale, self.check_enable_grayscale
)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_invert_grayscale, self.check_invert_grayscale
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_grayscale_component,
self.slider_grayscale_lightness,
)
self.Bind(
wx.EVT_SLIDER, self.on_slider_grayscale_component, self.slider_grayscale_red
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_grayscale_component,
self.slider_grayscale_green,
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_grayscale_component,
self.slider_grayscale_blue,
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: GrayscalePanel.__set_properties
self.check_enable_grayscale.SetToolTip(_("Enable Grayscale Convert"))
self.check_enable_grayscale.SetValue(1)
self.check_invert_grayscale.SetToolTip(_("Invert Grayscale"))
self.slider_grayscale_red.SetToolTip(_("Red component amount"))
self.text_grayscale_red.SetToolTip(_("Red Factor"))
self.slider_grayscale_green.SetToolTip(_("Green component control"))
self.text_grayscale_green.SetToolTip(_("Green Factor"))
self.slider_grayscale_blue.SetToolTip(_("Blue component control"))
self.text_grayscale_blue.SetToolTip(_("Blue Factor"))
self.slider_grayscale_lightness.SetToolTip(_("Lightness control"))
self.text_grayscale_lightness.SetToolTip(_("Lightness"))
# end wxGlade
def __do_layout(self):
sizer_grayscale = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Grayscale")), wx.VERTICAL
)
sizer_grayscale_lightness = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Lightness")), wx.HORIZONTAL
)
sizer_grayscale_blue = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Blue")), wx.HORIZONTAL
)
sizer_grayscale_green = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Green")), wx.HORIZONTAL
)
sizer_grayscale_red = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Red")), wx.HORIZONTAL
)
sizer_grayscale.Add(self.check_enable_grayscale, 0, 0, 0)
sizer_grayscale.Add(self.check_invert_grayscale, 0, 0, 0)
sizer_grayscale_red.Add(self.slider_grayscale_red, 5, wx.EXPAND, 0)
sizer_grayscale_red.Add(self.text_grayscale_red, 1, 0, 0)
sizer_grayscale.Add(sizer_grayscale_red, 0, wx.EXPAND, 0)
sizer_grayscale_green.Add(self.slider_grayscale_green, 5, wx.EXPAND, 0)
sizer_grayscale_green.Add(self.text_grayscale_green, 1, 0, 0)
sizer_grayscale.Add(sizer_grayscale_green, 0, wx.EXPAND, 0)
sizer_grayscale_blue.Add(self.slider_grayscale_blue, 5, wx.EXPAND, 0)
sizer_grayscale_blue.Add(self.text_grayscale_blue, 1, 0, 0)
sizer_grayscale.Add(sizer_grayscale_blue, 0, wx.EXPAND, 0)
sizer_grayscale_lightness.Add(self.slider_grayscale_lightness, 5, wx.EXPAND, 0)
sizer_grayscale_lightness.Add(self.text_grayscale_lightness, 1, 0, 0)
sizer_grayscale.Add(sizer_grayscale_lightness, 0, wx.EXPAND, 0)
self.SetSizer(sizer_grayscale)
sizer_grayscale.Fit(self)
self.Layout()
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_grayscale.SetValue(op["enable"])
self.check_invert_grayscale.SetValue(op["invert"])
self.slider_grayscale_red.SetValue(int(op["red"] * 500.0))
self.text_grayscale_red.SetValue(str(self.op["red"]))
self.slider_grayscale_green.SetValue(int(op["green"] * 500.0))
self.text_grayscale_green.SetValue(str(self.op["green"]))
self.slider_grayscale_blue.SetValue(int(op["blue"] * 500.0))
self.text_grayscale_blue.SetValue(str(self.op["blue"]))
self.slider_grayscale_lightness.SetValue(int(op["lightness"] * 500.0))
self.text_grayscale_lightness.SetValue(str(self.op["lightness"]))
def on_check_enable_grayscale(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_grayscale.GetValue()
self.context.signal("RasterWizard-Image")
def on_check_invert_grayscale(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["invert"] = self.check_invert_grayscale.GetValue()
self.context.signal("RasterWizard-Image")
def on_slider_grayscale_component(
self, event=None
): # wxGlade: GrayscalePanel.<event_handler>
self.op["red"] = float(int(self.slider_grayscale_red.GetValue()) / 500.0)
self.text_grayscale_red.SetValue(str(self.op["red"]))
self.op["green"] = float(int(self.slider_grayscale_green.GetValue()) / 500.0)
self.text_grayscale_green.SetValue(str(self.op["green"]))
self.op["blue"] = float(int(self.slider_grayscale_blue.GetValue()) / 500.0)
self.text_grayscale_blue.SetValue(str(self.op["blue"]))
self.op["lightness"] = float(
int(self.slider_grayscale_lightness.GetValue()) / 500.0
)
self.text_grayscale_lightness.SetValue(str(self.op["lightness"]))
self.context.signal("RasterWizard-Image")
# end of class GrayscalePanel
class ToneCurvePanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: ToneCurvePanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self._tone_panel_buffer = None
self.check_enable_tone = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.button_reset_tone = wx.Button(self, wx.ID_ANY, _("Reset"))
self.curve_panel = wx.Panel(self, wx.ID_ANY)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_enable_tone, self.check_enable_tone)
self.Bind(wx.EVT_BUTTON, self.on_button_reset_tone, self.button_reset_tone)
# end wxGlade
self.curve_panel.Bind(wx.EVT_PAINT, self.on_tone_panel_paint)
self.curve_panel.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None)
self.curve_panel.Bind(wx.EVT_MOTION, self.on_curve_mouse_move)
self.curve_panel.Bind(wx.EVT_LEFT_DOWN, self.on_curve_mouse_left_down)
self.curve_panel.Bind(wx.EVT_LEFT_UP, self.on_curve_mouse_left_up)
self.curve_panel.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.on_curve_mouse_lost)
self.context = None
self.op = None
self.original_op = None
self.point = -1
def __set_properties(self):
# begin wxGlade: ToneCurvePanel.__set_properties
self.check_enable_tone.SetToolTip(_("Enable Tone Curve"))
self.check_enable_tone.SetValue(1)
self.button_reset_tone.SetToolTip(_("Reset Tone Curve"))
self.curve_panel.SetMinSize((256, 256))
self.curve_panel.SetBackgroundColour(wx.Colour(255, 255, 255))
# end wxGlade
def __do_layout(self):
# begin wxGlade: ToneCurvePanel.__do_layout
sizer_tone = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Tone Curve")), wx.VERTICAL
)
sizer_tone_curve = wx.BoxSizer(wx.HORIZONTAL)
sizer_tone_curve.Add(self.check_enable_tone, 0, 0, 0)
sizer_tone_curve.Add(self.button_reset_tone, 0, 0, 0)
sizer_tone.Add(sizer_tone_curve, 0, wx.EXPAND, 0)
sizer_tone.Add(self.curve_panel, 0, wx.EXPAND, 0)
self.SetSizer(sizer_tone)
sizer_tone.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_tone.SetValue(op["enable"])
self.Layout()
width, height = self.curve_panel.Size
if width <= 0:
width = 1
if height <= 0:
height = 1
self._tone_panel_buffer = wx.Bitmap(width, height)
self.update_in_gui_thread()
def update_in_gui_thread(self):
self.on_update_tone()
try:
self.Refresh(True)
self.Update()
except RuntimeError:
pass
def on_tone_panel_paint(self, event=None):
try:
wx.BufferedPaintDC(self.curve_panel, self._tone_panel_buffer)
except RuntimeError:
pass
def on_curve_mouse_move(self, event):
if self.curve_panel.HasCapture():
pos = event.GetPosition()
try:
v = 255 - pos[1]
if self.op["type"] == "point":
current_x = pos[0]
if 0 <= current_x <= 255:
self.op["values"][pos[0]] = (pos[0], v)
else:
self.op["values"][self.point] = (pos[0], v)
self.context.signal("RasterWizard-Image")
self.update_in_gui_thread()
except (KeyError, IndexError):
pass
def on_curve_mouse_left_down(self, event):
if not self.curve_panel.HasCapture():
self.curve_panel.CaptureMouse()
distance = float("inf")
pos = event.GetPosition()
if self.op["type"] == "point":
v = 255 - pos[1]
self.point = pos[0]
self.op["values"][pos[0]] = (pos[0], v)
self.update_in_gui_thread()
else:
for i, q in enumerate(self.op["values"]):
dx = pos[0] - q[0]
dy = (255 - pos[1]) - q[1]
d = dx * dx + dy * dy
if d < distance:
distance = d
self.point = i
def on_curve_mouse_left_up(self, event=None):
if self.curve_panel.HasCapture():
self.curve_panel.ReleaseMouse()
def on_curve_mouse_lost(self, event=None):
pass
def on_update_tone(self, event=None):
if self._tone_panel_buffer is None:
return
dc = wx.MemoryDC()
dc.SelectObject(self._tone_panel_buffer)
dc.Clear()
dc.SetBackground(wx.GREEN_BRUSH)
gc = wx.GraphicsContext.Create(dc)
gc.PushState()
gc.SetPen(wx.BLACK_PEN)
tone_values = self.op["values"]
if self.op["type"] == "spline":
spline = RasterScripts.spline(tone_values)
starts = [(i, 255 - spline[i]) for i in range(255)]
ends = [(i, 255 - spline[i]) for i in range(1, 256)]
else:
tone_values = [q for q in tone_values if q is not None]
spline = RasterScripts.line(tone_values)
starts = [(i, 255 - spline[i]) for i in range(255)]
ends = [(i, 255 - spline[i]) for i in range(1, 256)]
gc.StrokeLineSegments(starts, ends)
gc.PopState()
gc.Destroy()
del dc
def on_check_enable_tone(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_tone.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_tone(self, event=None): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.original_op["enable"]
self.op["type"] = self.original_op["type"]
self.op["values"].clear()
self.op["values"].extend(self.original_op["values"])
self.context.signal("RasterWizard-Image")
self.on_update_tone()
self.update_in_gui_thread()
# end of class ToneCurvePanel
class SharpenPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: SharpenPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_sharpen = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.button_reset_sharpen = wx.Button(self, wx.ID_ANY, _("Reset"))
self.slider_sharpen_percent = wx.Slider(
self, wx.ID_ANY, 500, 0, 1000, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_sharpen_percent = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_sharpen_radius = wx.Slider(
self, wx.ID_ANY, 20, 0, 50, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_sharpen_radius = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_sharpen_threshold = wx.Slider(
self, wx.ID_ANY, 6, 0, 50, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_sharpen_threshold = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_sharpen, self.check_enable_sharpen
)
self.Bind(
wx.EVT_BUTTON, self.on_button_reset_sharpen, self.button_reset_sharpen
)
self.Bind(
wx.EVT_SLIDER, self.on_slider_sharpen_percent, self.slider_sharpen_percent
)
self.Bind(wx.EVT_TEXT, self.on_text_sharpen_percent, self.text_sharpen_percent)
self.Bind(
wx.EVT_SLIDER, self.on_slider_sharpen_radius, self.slider_sharpen_radius
)
self.Bind(wx.EVT_TEXT, self.on_text_sharpen_radius, self.text_sharpen_radius)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_sharpen_threshold,
self.slider_sharpen_threshold,
)
self.Bind(
wx.EVT_TEXT, self.on_text_sharpen_threshold, self.text_sharpen_threshold
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: SharpenPanel.__set_properties
self.check_enable_sharpen.SetToolTip(_("Enable Sharpen"))
self.check_enable_sharpen.SetValue(1)
self.button_reset_sharpen.SetToolTip(_("Sharpen Reset"))
self.slider_sharpen_percent.SetToolTip(_("Strength of sharpening in percent"))
self.text_sharpen_percent.SetToolTip(_("amount of sharpening in %"))
self.slider_sharpen_radius.SetToolTip(
_("Blur radius for the sharpening operation")
)
self.text_sharpen_radius.SetToolTip(_("Sharpen radius amount"))
self.slider_sharpen_threshold.SetToolTip(
_("Threshold controls the minimum brighteness change to be sharpened.")
)
self.text_sharpen_threshold.SetToolTip(_("Sharpen Threshold Amount"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: SharpenPanel.__do_layout
sizer_sharpen = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Sharpen")), wx.VERTICAL
)
sizer_sharpen_threshold = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Threshold")), wx.HORIZONTAL
)
sizer_sharpen_radius = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Radius")), wx.HORIZONTAL
)
sizer_sharpen_percent = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Percent")), wx.HORIZONTAL
)
sizer_sharpen_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_sharpen_main.Add(self.check_enable_sharpen, 0, 0, 0)
sizer_sharpen_main.Add(self.button_reset_sharpen, 0, 0, 0)
sizer_sharpen.Add(sizer_sharpen_main, 0, wx.EXPAND, 0)
sizer_sharpen_percent.Add(self.slider_sharpen_percent, 5, wx.EXPAND, 0)
sizer_sharpen_percent.Add(self.text_sharpen_percent, 1, 0, 0)
sizer_sharpen.Add(sizer_sharpen_percent, 0, wx.EXPAND, 0)
sizer_sharpen_radius.Add(self.slider_sharpen_radius, 5, wx.EXPAND, 0)
sizer_sharpen_radius.Add(self.text_sharpen_radius, 1, 0, 0)
sizer_sharpen.Add(sizer_sharpen_radius, 0, wx.EXPAND, 0)
sizer_sharpen_threshold.Add(self.slider_sharpen_threshold, 5, wx.EXPAND, 0)
sizer_sharpen_threshold.Add(self.text_sharpen_threshold, 1, 0, 0)
sizer_sharpen.Add(sizer_sharpen_threshold, 0, wx.EXPAND, 0)
self.SetSizer(sizer_sharpen)
sizer_sharpen.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_sharpen.SetValue(op["enable"])
self.slider_sharpen_percent.SetValue(op["percent"])
self.slider_sharpen_radius.SetValue(op["radius"])
self.slider_sharpen_threshold.SetValue(op["threshold"])
self.text_sharpen_percent.SetValue(str(op["percent"]))
self.text_sharpen_radius.SetValue(str(op["radius"]))
self.text_sharpen_threshold.SetValue(str(op["threshold"]))
def on_check_enable_sharpen(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["enable"] = self.check_enable_sharpen.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_sharpen(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["percent"] = self.original_op["percent"]
self.op["radius"] = self.original_op["radius"]
self.op["threshold"] = self.original_op["threshold"]
self.slider_sharpen_percent.SetValue(self.op["percent"])
self.slider_sharpen_radius.SetValue(self.op["radius"])
self.slider_sharpen_threshold.SetValue(self.op["threshold"])
self.text_sharpen_percent.SetValue(str(self.op["percent"]))
self.text_sharpen_radius.SetValue(str(self.op["radius"]))
self.text_sharpen_threshold.SetValue(str(self.op["threshold"]))
self.context.signal("RasterWizard-Image")
def on_slider_sharpen_percent(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["percent"] = int(self.slider_sharpen_percent.GetValue())
self.text_sharpen_percent.SetValue(str(self.op["percent"]))
self.context.signal("RasterWizard-Image")
def on_text_sharpen_percent(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
pass
def on_slider_sharpen_radius(self, event): # wxGlade: RasterWizard.<event_handler>
self.op["radius"] = int(self.slider_sharpen_radius.GetValue())
self.text_sharpen_radius.SetValue(str(self.op["radius"]))
self.context.signal("RasterWizard-Image")
def on_text_sharpen_radius(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
pass
def on_slider_sharpen_threshold(
self, event=None
): # wxGlade: RasterWizard.<event_handler>
self.op["threshold"] = int(self.slider_sharpen_threshold.GetValue())
self.text_sharpen_threshold.SetValue(str(self.op["threshold"]))
self.context.signal("RasterWizard-Image")
def on_text_sharpen_threshold(self, event): # wxGlade: RasterWizard.<event_handler>
pass
# end of class SharpenPanel
class OutputPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: OutputPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_output = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.check_replace_output = wx.CheckBox(self, wx.ID_ANY, _("Replace Image"))
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_output, self.check_enable_output
)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_replace_output, self.check_replace_output
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: OutputPanel.__set_properties
self.check_enable_output.SetToolTip(_("Enable Output"))
self.check_enable_output.SetValue(1)
self.check_replace_output.SetToolTip(_("Replace image with this output"))
self.check_replace_output.SetValue(1)
self.check_replace_output.Enable(False)
# end wxGlade
def __do_layout(self):
# begin wxGlade: OutputPanel.__do_layout
sizer_output = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Output")), wx.VERTICAL
)
sizer_output.Add(self.check_enable_output, 0, 0, 0)
sizer_output.Add(self.check_replace_output, 0, 0, 0)
self.SetSizer(sizer_output)
sizer_output.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
try:
self.check_enable_output.SetValue(self.op["enable"])
self.check_replace_output.SetValue(self.op["replace"])
except KeyError:
pass
def on_check_enable_output(
self, event=None
): # wxGlade: OutputPanel.<event_handler>
self.op["enable"] = self.check_enable_output.GetValue()
def on_check_replace_output(
self, event=None
): # wxGlade: OutputPanel.<event_handler>
self.op["replace"] = self.check_replace_output.GetValue()
# end of class OutputPanel
class BasicPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: OutputPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_CHECKBOX, self.on_check_enable, self.check_enable)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: OutputPanel.__set_properties
self.check_enable.SetToolTip(_("Enable Operation"))
self.check_enable.SetValue(1)
# end wxGlade
def __do_layout(self):
# begin wxGlade: OutputPanel.__do_layout
sizer_output = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Enable")), wx.VERTICAL
)
sizer_output.Add(self.check_enable, 0, 0, 0)
self.SetSizer(sizer_output)
sizer_output.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.check_enable.SetLabel(_("Enable %s") % op["name"])
self.check_enable.SetValue(op["enable"])
self.context = context
self.op = op
self.original_op = deepcopy(op)
def on_check_enable(self, event=None):
self.op["enable"] = self.check_enable.GetValue()
self.context.signal("RasterWizard-Image")
# end of class OutputPanel
class ContrastPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: ContrastPanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_contrast = wx.CheckBox(self, wx.ID_ANY, _("Enable"))
self.button_reset_contrast = wx.Button(self, wx.ID_ANY, _("Reset"))
self.slider_contrast_contrast = wx.Slider(
self, wx.ID_ANY, 0, -127, 127, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_contrast_contrast = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_contrast_brightness = wx.Slider(
self, wx.ID_ANY, 0, -127, 127, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_contrast_brightness = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_contrast, self.check_enable_contrast
)
self.Bind(
wx.EVT_BUTTON, self.on_button_reset_contrast, self.button_reset_contrast
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_contrast_contrast,
self.slider_contrast_contrast,
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_contrast_brightness,
self.slider_contrast_brightness,
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: ContrastPanel.__set_properties
self.check_enable_contrast.SetToolTip(_("Enable Contrast"))
self.check_enable_contrast.SetValue(1)
self.button_reset_contrast.SetToolTip(_("Reset Contrast"))
self.slider_contrast_contrast.SetToolTip(_("Contrast amount"))
self.text_contrast_contrast.SetToolTip(
_("Contrast the lights and darks by how much?")
)
self.slider_contrast_brightness.SetToolTip(_("Brightness amount"))
self.text_contrast_brightness.SetToolTip(
_("Make the image how much more bright?")
)
# end wxGlade
def __do_layout(self):
# begin wxGlade: ContrastPanel.__do_layout
sizer_contrast = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Contrast")), wx.VERTICAL
)
sizer_contrast_brightness = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Brightness Amount")), wx.HORIZONTAL
)
sizer_contrast_contrast = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Contrast Amount")), wx.HORIZONTAL
)
sizer_contrast_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_contrast_main.Add(self.check_enable_contrast, 0, 0, 0)
sizer_contrast_main.Add(self.button_reset_contrast, 0, 0, 0)
sizer_contrast.Add(sizer_contrast_main, 0, wx.EXPAND, 0)
sizer_contrast_contrast.Add(self.slider_contrast_contrast, 5, wx.EXPAND, 0)
sizer_contrast_contrast.Add(self.text_contrast_contrast, 1, 0, 0)
sizer_contrast.Add(sizer_contrast_contrast, 0, wx.EXPAND, 0)
sizer_contrast_brightness.Add(self.slider_contrast_brightness, 5, wx.EXPAND, 0)
sizer_contrast_brightness.Add(self.text_contrast_brightness, 1, 0, 0)
sizer_contrast.Add(sizer_contrast_brightness, 0, wx.EXPAND, 0)
self.SetSizer(sizer_contrast)
sizer_contrast.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_contrast.SetValue(self.op["enable"])
self.text_contrast_contrast.SetValue(str(self.op["contrast"]))
self.text_contrast_brightness.SetValue(str(self.op["brightness"]))
self.slider_contrast_contrast.SetValue(self.op["contrast"])
self.slider_contrast_brightness.SetValue(self.op["brightness"])
def on_check_enable_contrast(
self, event=None
): # wxGlade: ContrastPanel.<event_handler>
self.op["enable"] = self.check_enable_contrast.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_contrast(
self, event=None
): # wxGlade: ContrastPanel.<event_handler>
self.op["contrast"] = self.original_op["contrast"]
self.op["brightness"] = self.original_op["brightness"]
self.text_contrast_contrast.SetValue(str(self.op["contrast"]))
self.text_contrast_brightness.SetValue(str(self.op["brightness"]))
self.slider_contrast_contrast.SetValue(self.op["contrast"])
self.slider_contrast_brightness.SetValue(self.op["brightness"])
self.context.signal("RasterWizard-Image")
def on_slider_contrast_contrast(
self, event=None
): # wxGlade: ContrastPanel.<event_handler>
self.op["contrast"] = int(self.slider_contrast_contrast.GetValue())
self.text_contrast_contrast.SetValue(str(self.op["contrast"]))
self.context.signal("RasterWizard-Image")
def on_slider_contrast_brightness(
self, event=None
): # wxGlade: ContrastPanel.<event_handler>
self.op["brightness"] = int(self.slider_contrast_brightness.GetValue())
self.text_contrast_brightness.SetValue(str(self.op["brightness"]))
self.context.signal("RasterWizard-Image")
# end of class ContrastPanel
class HalftonePanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: HalftonePanel.__init__
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.check_enable_halftone = wx.CheckBox(self, wx.ID_ANY, "Enable")
self.button_reset_halftone = wx.Button(self, wx.ID_ANY, "Reset")
self.check_halftone_black = wx.CheckBox(self, wx.ID_ANY, "Black")
self.slider_halftone_sample = wx.Slider(
self, wx.ID_ANY, 10, 0, 50, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_halftone_sample = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_halftone_angle = wx.Slider(
self, wx.ID_ANY, 22, 0, 90, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_halftone_angle = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.slider_halftone_oversample = wx.Slider(
self, wx.ID_ANY, 2, 0, 50, style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL
)
self.text_halftone_oversample = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
self.__set_properties()
self.__do_layout()
self.Bind(
wx.EVT_CHECKBOX, self.on_check_enable_halftone, self.check_enable_halftone
)
self.Bind(
wx.EVT_BUTTON, self.on_button_reset_halftone, self.button_reset_halftone
)
self.Bind(
wx.EVT_CHECKBOX, self.on_check_halftone_black, self.check_halftone_black
)
self.Bind(
wx.EVT_SLIDER, self.on_slider_halftone_sample, self.slider_halftone_sample
)
self.Bind(
wx.EVT_SLIDER, self.on_slider_halftone_angle, self.slider_halftone_angle
)
self.Bind(
wx.EVT_SLIDER,
self.on_slider_halftone_oversample,
self.slider_halftone_oversample,
)
# end wxGlade
self.context = None
self.op = None
self.original_op = None
def __set_properties(self):
# begin wxGlade: HalftonePanel.__set_properties
self.check_enable_halftone.SetToolTip(_("Enable Halftone"))
self.check_enable_halftone.SetValue(1)
self.button_reset_halftone.SetToolTip(_("Halftone Reset"))
self.check_halftone_black.SetToolTip(_("Use black rather than white dots"))
self.slider_halftone_sample.SetToolTip(_("Sample size for halftone dots"))
self.text_halftone_sample.SetToolTip(_("Halftone dot size"))
self.slider_halftone_angle.SetToolTip(_("Angle for halftone dots"))
self.text_halftone_angle.SetToolTip(_("Halftone dot angle"))
self.slider_halftone_oversample.SetToolTip(
_("Oversampling amount for halftone-dots")
)
self.text_halftone_oversample.SetToolTip(_("Halftone dot oversampling amount"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: HalftonePanel.__do_layout
sizer_halftone = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Halftone")), wx.VERTICAL
)
sizer_halftone_oversample = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Oversample")), wx.HORIZONTAL
)
sizer_halftone_angle = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Angle")), wx.HORIZONTAL
)
sizer_halftone_sample = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, _("Sample")), wx.HORIZONTAL
)
sizer_halftone_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_halftone_main.Add(self.check_enable_halftone, 0, 0, 0)
sizer_halftone_main.Add(self.button_reset_halftone, 0, 0, 0)
sizer_halftone_main.Add((20, 20), 0, 0, 0)
sizer_halftone_main.Add(self.check_halftone_black, 0, 0, 0)
sizer_halftone.Add(sizer_halftone_main, 0, wx.EXPAND, 0)
sizer_halftone_sample.Add(self.slider_halftone_sample, 5, wx.EXPAND, 0)
sizer_halftone_sample.Add(self.text_halftone_sample, 1, 0, 0)
sizer_halftone.Add(sizer_halftone_sample, 0, wx.EXPAND, 0)
sizer_halftone_angle.Add(self.slider_halftone_angle, 5, wx.EXPAND, 0)
sizer_halftone_angle.Add(self.text_halftone_angle, 1, 0, 0)
sizer_halftone.Add(sizer_halftone_angle, 0, wx.EXPAND, 0)
sizer_halftone_oversample.Add(self.slider_halftone_oversample, 5, wx.EXPAND, 0)
sizer_halftone_oversample.Add(self.text_halftone_oversample, 1, 0, 0)
sizer_halftone.Add(sizer_halftone_oversample, 0, wx.EXPAND, 0)
self.SetSizer(sizer_halftone)
sizer_halftone.Fit(self)
self.Layout()
# end wxGlade
def set_operation(self, context, op, svg_image=None):
self.context = context
self.op = op
self.original_op = deepcopy(op)
self.check_enable_halftone.SetValue(self.op["enable"])
self.check_halftone_black.SetValue(self.op["black"])
self.text_halftone_sample.SetValue(str(self.op["sample"]))
self.slider_halftone_sample.SetValue(self.op["sample"])
self.text_halftone_angle.SetValue(str(self.op["angle"]))
self.slider_halftone_angle.SetValue(self.op["angle"])
self.text_halftone_oversample.SetValue(str(self.op["oversample"]))
self.slider_halftone_oversample.SetValue(self.op["oversample"])
def on_check_enable_halftone(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["enable"] = self.check_enable_halftone.GetValue()
self.context.signal("RasterWizard-Image")
def on_button_reset_halftone(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["black"] = self.original_op["black"]
self.op["sample"] = self.original_op["sample"]
self.op["angle"] = self.original_op["angle"]
self.op["oversample"] = self.original_op["oversample"]
self.check_enable_halftone.SetValue(self.op["enable"])
self.check_halftone_black.SetValue(self.op["black"])
self.text_halftone_sample.SetValue(str(self.op["sample"]))
self.slider_halftone_sample.SetValue(self.op["sample"])
self.text_halftone_angle.SetValue(str(self.op["angle"]))
self.slider_halftone_angle.SetValue(self.op["angle"])
self.text_halftone_oversample.SetValue(str(self.op["oversample"]))
self.slider_halftone_oversample.SetValue(self.op["oversample"])
self.context.signal("RasterWizard-Image")
def on_check_halftone_black(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["black"] = self.check_halftone_black.GetValue()
self.context.signal("RasterWizard-Image")
def on_slider_halftone_sample(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["sample"] = int(self.slider_halftone_sample.GetValue())
self.text_halftone_sample.SetValue(str(self.op["sample"]))
self.context.signal("RasterWizard-Image")
def on_slider_halftone_angle(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["angle"] = int(self.slider_halftone_angle.GetValue())
self.text_halftone_angle.SetValue(str(self.op["angle"]))
self.context.signal("RasterWizard-Image")
def on_slider_halftone_oversample(
self, event=None
): # wxGlade: HalftonePanel.<event_handler>
self.op["oversample"] = int(self.slider_halftone_oversample.GetValue())
self.text_halftone_oversample.SetValue(str(self.op["oversample"]))
self.context.signal("RasterWizard-Image")
# end of class HalftonePanel
class RasterWizard(MWindow):
def __init__(self, *args, script=None, **kwds):
super().__init__(605, 636, *args, **kwds)
self.panel = RasterWizardPanel(
self, wx.ID_ANY, context=self.context, script=script
)
self.add_module_delegate(self.panel)
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_fantasy_50.GetBitmap())
self.SetIcon(_icon)
self.SetTitle(_("Raster Wizard"))
@staticmethod
def sub_register(kernel):
kernel.register(
"button/project/RasterWizard",
{
"label": _("RasterWizard"),
"icon": icons8_fantasy_50,
"tip": _("Run RasterWizard"),
"action": lambda v: kernel.console("window toggle RasterWizard\n"),
},
)
def restore(self, *args, script=None, **kwargs):
if script is not None:
self.panel.script = script
self.panel.set_wizard_script(script)
def window_open(self):
self.panel.pane_show()
def window_close(self):
self.panel.pane_hide()
| 38.962683 | 92 | 0.632929 |
d71b1e55267b749fd6db1d5dd0e80b07ff2a0278 | 666 | py | Python | google/cloud/__init__.py | tmdiep/python-pubsublite | 8edef6708fab60ce29c040f3de60783fe31b55ae | [
"Apache-2.0"
] | 15 | 2020-11-10T15:36:52.000Z | 2022-03-06T15:00:25.000Z | google/cloud/__init__.py | tmdiep/python-pubsublite | 8edef6708fab60ce29c040f3de60783fe31b55ae | [
"Apache-2.0"
] | 110 | 2020-11-11T18:14:31.000Z | 2022-03-30T22:42:17.000Z | google/cloud/__init__.py | tmdiep/python-pubsublite | 8edef6708fab60ce29c040f3de60783fe31b55ae | [
"Apache-2.0"
] | 6 | 2020-11-13T19:24:27.000Z | 2022-01-29T08:13:14.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
pkg_resources.declare_namespace(__name__)
| 33.3 | 74 | 0.75976 |
3a88cd407e6271bd09e3362616f570b23579f0dd | 2,466 | py | Python | PyScripts/New-KeeperSharedFolder.py | tonylanglet/keepersecurity-powershell | 5446e6ff789372d92bc7e7c9e22e4179dcec6c0f | [
"MIT"
] | 2 | 2021-09-08T03:00:24.000Z | 2022-02-07T17:52:28.000Z | PyScripts/New-KeeperSharedFolder.py | tonylanglet/keepersecurity-powershell | 5446e6ff789372d92bc7e7c9e22e4179dcec6c0f | [
"MIT"
] | 1 | 2022-02-07T13:49:38.000Z | 2022-02-07T13:49:38.000Z | PyScripts/New-KeeperSharedFolder.py | tonylanglet/keepersecurity-powershell | 5446e6ff789372d92bc7e7c9e22e4179dcec6c0f | [
"MIT"
] | 1 | 2021-12-09T23:29:22.000Z | 2021-12-09T23:29:22.000Z | import sys
import getopt
import getpass
import string
import argparse
from keepercommander.record import Record
from keepercommander.commands.folder import FolderMakeCommand
from keepercommander.params import KeeperParams
from keepercommander import display, api
my_params = KeeperParams()
# MAIN FUNCTION
def main(argv):
# Authentication credentials
authUsername = None
authPassword = None
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--all', type=bool, help='anyone has all permissions by default')
parser.add_argument('--manage-users', dest='manage_users', type=bool, help='anyone can manage users by default')
parser.add_argument('--manage-records', dest='manage_records', type=bool, help='anyone can manage records by default')
parser.add_argument('--can-share', dest='can_share', type=bool, help='anyone can share records by default')
parser.add_argument('--can-edit', dest='can_edit', type=bool, help='anyone can edit records by default')
parser.add_argument('--name', nargs='?', type=str, action='store', help='folder path folderUID')
parser.add_argument('-auser', '--ausername', type=str, help='Authentication username', required=True)
parser.add_argument('-apass', '--apassword', type=str, help='Authentication password', required=True)
args = parser.parse_args()
Parameters = dict()
Parameters.update({'shared_folder':True})
if args.all is not None:
Parameters.update({'all':args.all})
if args.manage_users is not None:
Parameters.update({'manage_users':args.manage_users})
if args.manage_records is not None:
Parameters.update({'manage_records':args.manage_records})
if args.can_share is not None:
Parameters.update({'can_share':args.can_share})
if args.can_edit is not None:
Parameters.update({'can_edit':args.can_edit})
if args.name is not None:
Parameters.update({'folder':args.name})
if args.ausername:
authUsername = args.ausername
if args.apassword:
authPassword = args.apassword
while not my_params.user:
my_params.user = authUsername
while not my_params.password:
my_params.password = authPassword
api.sync_down(my_params)
# KEEPER COMMAND
command = FolderMakeCommand()
result = command.execute(my_params, **Parameters)
print(result)
return result
if __name__ == "__main__":
main(sys.argv[1:])
| 36.80597 | 122 | 0.710462 |
be214f87ef07a5ec226cf71b88fd79b84aba8677 | 6,537 | py | Python | py/src/cfn/api.py | mpaulweeks/fgc | 2fa77896a59f55be0a68a7dffef76e3dc29f2497 | [
"MIT"
] | 20 | 2016-06-30T05:48:30.000Z | 2021-06-05T21:42:41.000Z | py/src/cfn/api.py | mpaulweeks/fgc | 2fa77896a59f55be0a68a7dffef76e3dc29f2497 | [
"MIT"
] | null | null | null | py/src/cfn/api.py | mpaulweeks/fgc | 2fa77896a59f55be0a68a7dffef76e3dc29f2497 | [
"MIT"
] | 9 | 2016-07-04T04:44:07.000Z | 2019-10-12T21:20:35.000Z | #!/usr/bin/env python3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import json
from py.src.logger import (
log,
log_exception,
)
from py.src.settings import (
DATABASE,
)
from py.src.error import (
CookieInvalidException,
MatchQueryException,
)
from py.src.message import (
send_error_message,
)
from py.src.cfn.cookie import (
wd_cookie,
load_cookie_status,
save_cookie_status,
)
from py.src.store import (
save_match_list,
save_ranking_results,
set_player_updated_at,
subscribe_to_new_player,
Player,
)
from py.src.match.model.cache import (
MatchCache,
)
from py.src.match.model.cfn import (
CFNPlayerSearchModel,
)
# to save all the logging
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
LICENSE_URL = '/bentov2/sf5/myinfo/%s/fighterlicense/%s'
RANKING_URL = '/bentov2/sf5/contents/%s/ranking;page=%s'
MATCH_URL = '/bentov2/sf5/myinfo/%s/match/%s;opponentfull:matchtype=0' # this is just ranked
RIVAL_URL = '/bentov2/sf5/myinfo/%s/searchrival/fightersid;id=%s:sort=lp:page=1:sortdir=d'
def create_session(cookie=None):
cookie = cookie or wd_cookie
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=1))
headers = {
'Cookie': cookie,
'User-Agent': 'game=KiwiGame, engine=UE4, version=0',
'Host': 'api.prod.capcomfighters.net',
'Connection': 'Keep-Alive',
'Cache-Control': 'no-cache',
}
session.headers.update(headers)
return session
def query_cookie_status(session):
url = LICENSE_URL % ("%s", "")
res = query_cfn(session, url)
cd = save_cookie_status(res)
return cd, res
def get_cookie_status(session):
cd = load_cookie_status()
if cd is None or cd.is_old():
cd, _ = query_cookie_status(session)
return cd
def test_cookie_status(session):
if get_cookie_status(session).is_invalid:
send_error_message("test_cookie_status failed")
raise CookieInvalidException
def query_cfn(session, url):
cookie = session.headers['Cookie']
req_number = cookie.split('%3A')[0].split('=')[1]
req_url = 'https://api.prod.capcomfighters.net%s' % (url % req_number)
response = session.get(req_url, verify=False)
return response
def record_top_players(session):
ranking_results = []
for i in range(1, 11):
url = RANKING_URL % ('%s', i)
rank_response = query_cfn(session, url)
if rank_response.status_code != 200:
raise Exception
rank_data = rank_response.json()['response'][0]['rankingresult']
ranking_results.extend(rank_data)
save_ranking_results(ranking_results)
def _query_player_match_history(session, pid):
url = MATCH_URL % ('%s', pid)
res = query_cfn(session, url)
if res.status_code != 200:
log("got a non-200 response:\n%s" % res.text)
raise MatchQueryException()
try:
match_results = res.json()['response'][0]['matchresults']
except Exception as e:
log_exception(e)
log('failed to extract json, dumping res:\n%s' % res.text)
raise MatchQueryException()
else:
return match_results
def _bulk_query_match_history(session, pids):
match_list = []
count = 1
total = len(pids)
player_matches = {}
is_error = False
for pid in pids:
log("Fetching match history for %s (%s/%s)" % (pid, count, total))
count += 1
try:
matches = _query_player_match_history(session, pid)
except MatchQueryException:
# pretend we got 0 matches so that player gets marked as "updated"
# this prevents a bunch of bad players from starving the rest
matches = []
is_error = True
match_list.extend(matches)
player_matches[pid] = matches
return is_error, match_list, player_matches
def batch_query_match_history(session, pids):
pids_list = list(pids)
total = len(pids_list)
batch = 50
player_matches = {}
any_error = False
log("Begin querying %s players" % total)
for idx in range(0, total, batch):
next_idx = idx + batch
bound = next_idx if next_idx < total else total
sub_pids = pids_list[idx:bound]
log('Attempting to query players %s-%s of %s' % (idx, bound, total))
is_error, matches, player_matches_dict = _bulk_query_match_history(
session, sub_pids
)
any_error = any_error or is_error
save_match_list(matches)
player_matches.update(player_matches_dict)
set_player_updated_at(player_matches.keys())
with DATABASE.atomic():
cache = MatchCache()
for pid, matches in player_matches.items():
cache.process_matches(pid, matches)
cache.save()
return any_error
def fix_player_names(session):
players = (
Player
.select()
.where(Player.name == '')
)
player_ids = [p.cfn_id for p in players]
log("found %s players with missing names" % len(player_ids))
if len(player_ids) == 0:
return
for pid in player_ids:
url = LICENSE_URL % ('%s', pid)
res = query_cfn(session, url)
try:
new_name = res.json()['response'][0]['displayid']
except json.decoder.JSONDecodeError as e:
log(res.text)
raise e
log('%s -> %s' % (pid, new_name))
(
Player
.update(name=new_name)
.where(Player.cfn_id == pid)
.execute()
)
PROBLEM_CHARS = frozenset('_')
def _query_rival(session, query_name, player_name):
url = RIVAL_URL % ('%s', query_name)
res = query_cfn(session, url)
matching_players = res.json()['response'][0]['searchresult']
for player in matching_players:
cfn_model = CFNPlayerSearchModel(player)
log('logging cfn player')
log(cfn_model.__dict__)
if cfn_model.name.lower() == player_name.lower():
subscribe_to_new_player(cfn_model)
return cfn_model.name
return None
def add_player_by_name(session, player_name):
test_cookie_status(session) # might raise
query_names = [player_name]
for pc in PROBLEM_CHARS:
if pc in player_name:
query_names.extend(player_name.split(pc))
for qn in query_names:
res = _query_rival(session, qn, player_name)
if res:
return res
return None
| 29.313901 | 93 | 0.646474 |
ccb9241290c89ad59eaceed482187c9812cd6eb3 | 523 | py | Python | api/apps/nodes/migrations/0002_notes_parent.py | DjangoStudyTeam/django-China-api | 80e88e85ddfeee96a2562cbc42b7e37985c17c2d | [
"MIT"
] | 1 | 2021-10-15T03:58:34.000Z | 2021-10-15T03:58:34.000Z | api/apps/nodes/migrations/0002_notes_parent.py | DjangoStudyTeam/django-China-api | 80e88e85ddfeee96a2562cbc42b7e37985c17c2d | [
"MIT"
] | 19 | 2021-10-14T13:36:23.000Z | 2022-03-22T13:47:05.000Z | api/apps/nodes/migrations/0002_notes_parent.py | DjangoStudyTeam/django-China-api | 80e88e85ddfeee96a2562cbc42b7e37985c17c2d | [
"MIT"
] | 1 | 2021-10-19T05:34:05.000Z | 2021-10-19T05:34:05.000Z | # Generated by Django 3.2.8 on 2021-12-14 22:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nodes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notes',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='nodes.notes', verbose_name='parent'),
),
]
| 26.15 | 170 | 0.644359 |
20714bc7fcdfde0bdc6bbd2fe59b2008e064f064 | 3,424 | py | Python | uniauth/decorators.py | omizrahi99/uniAuth | be33d3be10f82e955deffe7ad4b34eeafca4195a | [
"Apache-2.0"
] | null | null | null | uniauth/decorators.py | omizrahi99/uniAuth | be33d3be10f82e955deffe7ad4b34eeafca4195a | [
"Apache-2.0"
] | null | null | null | uniauth/decorators.py | omizrahi99/uniAuth | be33d3be10f82e955deffe7ad4b34eeafca4195a | [
"Apache-2.0"
] | null | null | null | import logging
from django.conf import settings
from django.contrib.auth import logout
from django.http import HttpResponseBadRequest
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.translation import gettext as _
from django.shortcuts import render
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
from .utils import repr_saml, get_idp_config
logger = logging.getLogger(__name__)
_not_valid_saml_msg = _('Not a valid SAML Session, Probably your request is '
'expired or you refreshed your page getting in a stale '
'request. Please come back to your SP and renew '
'the authentication request')
def store_params_in_session(request):
""" Entrypoint view for SSO. Gathers the parameters from the
HTTP request and stores them in the session
It do not return anything because request come as pointer
"""
if request.method == 'POST':
passed_data = request.POST
binding = BINDING_HTTP_POST
else:
passed_data = request.GET
binding = BINDING_HTTP_REDIRECT
saml_request = passed_data.get('SAMLRequest')
if saml_request:
msg = "SAML request [\n{}]"
logger.debug(msg.format(repr_saml(saml_request, b64=True)))
else:
msg = _('not a valid SAMLRequest: {}').format(_('AuthnRequest is missing. Please Retry'))
logger.info('SAML Request absent from {}'.format(request))
return render(request, 'error.html',
{'exception_type': msg,
'exception_msg': _('Please renew your SAML Request'),
'extra_message': _not_valid_saml_msg},
status=403)
request.session['SAML'] = {}
request.session['SAML']['SAMLRequest'] = saml_request
request.session['SAML']['Binding'] = binding
request.session['SAML']['RelayState'] = passed_data.get('RelayState', '')
def store_params_in_session_func(func_to_decorate):
""" store_params_in_session as a funcion decorator
"""
def new_func(*original_args, **original_kwargs):
request = original_args[0]
try:
store_params_in_session(request)
return func_to_decorate(*original_args, **original_kwargs)
except Exception as e:
msg = _('not a valid SAMLRequest: {}').format(e)
return render(request, 'error.html',
{'exception_type':msg,
'exception_msg':_('Please renew your SAML Request'),
'extra_message': _not_valid_saml_msg},
status=403)
return new_func
def require_saml_request(func_to_decorate):
""" store_params_in_session as a funcion decorator
"""
def new_func(*original_args, **original_kwargs):
request = original_args[0]
if not request.session.get('SAML') or \
not request.session.get('SAML', {}).get('SAMLRequest'):
return render(request, 'error.html',
{'exception_type':_("You cannot access to this service directly"),
'exception_msg':_('Please renew your SAML Request'),
'extra_message': _not_valid_saml_msg},
status=403)
return func_to_decorate(*original_args, **original_kwargs)
return new_func
| 39.813953 | 97 | 0.631133 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.