hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c35d5860cc2768a17999b15772ec1eef57bef33 | 5,683 | py | Python | pymtl3/dsl/ComponentLevel5.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 152 | 2020-06-03T02:34:11.000Z | 2022-03-30T04:16:45.000Z | pymtl3/dsl/ComponentLevel5.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 139 | 2019-05-29T00:37:09.000Z | 2020-05-17T16:49:26.000Z | pymtl3/dsl/ComponentLevel5.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 22 | 2020-05-18T13:42:05.000Z | 2022-03-11T08:37:51.000Z | """
========================================================================
ComponentLevel5.py
========================================================================
We allow CallerPort to be connected to CalleePort
Author : Shunning Jiang
Date : Dec 29, 2018
"""
from .ComponentLevel1 import ComponentLevel1
from .ComponentLevel2 import ComponentLevel2
from .ComponentLevel4 import ComponentLevel4
from .Connectable import CalleePort, CallerPort, Const, Interface, MethodPort, Signal
from .errors import InvalidConnectionError, MultiWriterError
from .NamedObject import NamedObject
from .Placeholder import Placeholder
# This method_port is a syntactic sugar to create a CalleePort
# Note that for a simple method port we currently don't care about type
def method_port( method ):
method._callee_port = True
return method
class ComponentLevel5( ComponentLevel4 ):
#-----------------------------------------------------------------------
# Private methods
#-----------------------------------------------------------------------
def _handle_decorated_methods( s ):
for x in s.__class__.__dict__:
method = getattr( s, x )
# We identify decorated method port here
if hasattr( method, "_callee_port" ):
setattr( s, x, CalleePort( method = method ) )
# Override
def _construct( s ):
""" We override _construct here to add method binding. Basically
we do this after the class is constructed but before the construct()
elaboration happens."""
if not s._dsl.constructed:
# Merge the actual keyword args and those args set by set_parameter
if s._dsl.param_tree is None:
kwargs = s._dsl.kwargs
elif s._dsl.param_tree.leaf is None:
kwargs = s._dsl.kwargs
else:
kwargs = s._dsl.kwargs.copy()
if "construct" in s._dsl.param_tree.leaf:
more_args = s._dsl.param_tree.leaf[ "construct" ]
kwargs.update( more_args )
s._handle_decorated_methods()
# Same as parent class _construct
s.construct( *s._dsl.args, **kwargs )
s._dsl.constructed = True
def _connect_method_ports( s, o1, o2 ):
s._dsl.adjacency[o1].add( o2 )
s._dsl.adjacency[o2].add( o1 )
s._dsl.connect_order.append( (o1, o2) )
# Override
def _connect_dispatch( s, o1, o2, o1_connectable, o2_connectable ):
if o1_connectable and o2_connectable:
# if both connectable, dispatch signal-signal and interface-interface
if isinstance( o1, Signal ) and isinstance(o2, Signal ):
s._connect_signal_signal( o1, o2 )
elif isinstance( o1, Interface ) and isinstance( o2, Interface ):
s._connect_interfaces( o1, o2 )
# Methodport added here
elif isinstance( o1, MethodPort ) and isinstance( o2, MethodPort ):
s._connect_method_ports( o1, o2 )
else:
raise InvalidConnectionError("{} cannot be connected to {}: {} != {}" \
.format(repr(o1), repr(o2), type(o1), type(o2)) )
else:
# One is connectable, we make sure it's o1
if o2_connectable:
o1, o2 = o2, o1
assert isinstance( o1, Signal ), f"Cannot connect {o2} to {o1!r} of {type(o1)}."
s._connect_signal_const( o1, o2 )
def _resolve_method_connections( s ):
# First of all, bfs the "forest" to find out all nets
nets = s._floodfill_nets( s._dsl.all_method_ports, s._dsl.all_adjacency )
# All CalleePort are "writers" because they have actual methods
ret = []
for net in nets:
writer = None
for member in net:
if isinstance( member, CalleePort ):
if member.method is not None or \
isinstance( member.get_host_component(), Placeholder ):
if writer is None:
writer = member
else:
raise MultiWriterError( \
"Two-method conflict \"{}\", \"{}\" in the following net:\n - {}".format(
repr(member), repr(writer),
"\n - ".join([repr(x) for x in net])) )
else:
assert isinstance( member, CallerPort ), "We don't allow connecting method " \
"port to other ports of {} type".format( member.__class__ )
assert writer is not None, "This method net has no actual method to call.\n- {}" \
.format( '\n- '.join([ repr(x) for x in net]) )
ret.append( (writer, net) )
return ret
# TODO Check if all method net port directions are correct
#-----------------------------------------------------------------------
# elaborate
#-----------------------------------------------------------------------
# We still reuse the elaborate template by adding functionalities to
# sub-functions called by elaborate
# Override
def _elaborate_declare_vars( s ):
super()._elaborate_declare_vars()
s._dsl.all_method_ports = set()
# However, we need to override the whole function here because we want
# to add some fine-grained functionalities to avoid reduntant isinstance
# Override
def _elaborate_collect_all_vars( s ):
for c in s._dsl.all_named_objects:
if isinstance( c, Signal ):
s._dsl.all_signals.add( c )
elif isinstance( c, ComponentLevel1 ):
s._dsl.all_components.add( c )
s._collect_vars( c )
# Added here
elif isinstance( c, MethodPort ):
s._dsl.all_method_ports.add( c )
s._dsl.all_value_nets = s._resolve_value_connections()
# Added here
s._dsl.all_method_nets = s._resolve_method_connections()
s._dsl._has_pending_value_connections = False
s._dsl._has_pending_method_connections = False
| 35.298137 | 110 | 0.602147 | from .ComponentLevel1 import ComponentLevel1
from .ComponentLevel2 import ComponentLevel2
from .ComponentLevel4 import ComponentLevel4
from .Connectable import CalleePort, CallerPort, Const, Interface, MethodPort, Signal
from .errors import InvalidConnectionError, MultiWriterError
from .NamedObject import NamedObject
from .Placeholder import Placeholder
def method_port( method ):
method._callee_port = True
return method
class ComponentLevel5( ComponentLevel4 ):
#-----------------------------------------------------------------------
# Private methods
#-----------------------------------------------------------------------
def _handle_decorated_methods( s ):
for x in s.__class__.__dict__:
method = getattr( s, x )
# We identify decorated method port here
if hasattr( method, "_callee_port" ):
setattr( s, x, CalleePort( method = method ) )
# Override
def _construct( s ):
if not s._dsl.constructed:
# Merge the actual keyword args and those args set by set_parameter
if s._dsl.param_tree is None:
kwargs = s._dsl.kwargs
elif s._dsl.param_tree.leaf is None:
kwargs = s._dsl.kwargs
else:
kwargs = s._dsl.kwargs.copy()
if "construct" in s._dsl.param_tree.leaf:
more_args = s._dsl.param_tree.leaf[ "construct" ]
kwargs.update( more_args )
s._handle_decorated_methods()
# Same as parent class _construct
s.construct( *s._dsl.args, **kwargs )
s._dsl.constructed = True
def _connect_method_ports( s, o1, o2 ):
s._dsl.adjacency[o1].add( o2 )
s._dsl.adjacency[o2].add( o1 )
s._dsl.connect_order.append( (o1, o2) )
# Override
def _connect_dispatch( s, o1, o2, o1_connectable, o2_connectable ):
if o1_connectable and o2_connectable:
# if both connectable, dispatch signal-signal and interface-interface
if isinstance( o1, Signal ) and isinstance(o2, Signal ):
s._connect_signal_signal( o1, o2 )
elif isinstance( o1, Interface ) and isinstance( o2, Interface ):
s._connect_interfaces( o1, o2 )
# Methodport added here
elif isinstance( o1, MethodPort ) and isinstance( o2, MethodPort ):
s._connect_method_ports( o1, o2 )
else:
raise InvalidConnectionError("{} cannot be connected to {}: {} != {}" \
.format(repr(o1), repr(o2), type(o1), type(o2)) )
else:
# One is connectable, we make sure it's o1
if o2_connectable:
o1, o2 = o2, o1
assert isinstance( o1, Signal ), f"Cannot connect {o2} to {o1!r} of {type(o1)}."
s._connect_signal_const( o1, o2 )
def _resolve_method_connections( s ):
nets = s._floodfill_nets( s._dsl.all_method_ports, s._dsl.all_adjacency )
ret = []
for net in nets:
writer = None
for member in net:
if isinstance( member, CalleePort ):
if member.method is not None or \
isinstance( member.get_host_component(), Placeholder ):
if writer is None:
writer = member
else:
raise MultiWriterError( \
"Two-method conflict \"{}\", \"{}\" in the following net:\n - {}".format(
repr(member), repr(writer),
"\n - ".join([repr(x) for x in net])) )
else:
assert isinstance( member, CallerPort ), "We don't allow connecting method " \
"port to other ports of {} type".format( member.__class__ )
assert writer is not None, "This method net has no actual method to call.\n- {}" \
.format( '\n- '.join([ repr(x) for x in net]) )
ret.append( (writer, net) )
return ret
# TODO Check if all method net port directions are correct
#-----------------------------------------------------------------------
# elaborate
#-----------------------------------------------------------------------
# We still reuse the elaborate template by adding functionalities to
# sub-functions called by elaborate
# Override
def _elaborate_declare_vars( s ):
super()._elaborate_declare_vars()
s._dsl.all_method_ports = set()
# However, we need to override the whole function here because we want
# to add some fine-grained functionalities to avoid reduntant isinstance
# Override
def _elaborate_collect_all_vars( s ):
for c in s._dsl.all_named_objects:
if isinstance( c, Signal ):
s._dsl.all_signals.add( c )
elif isinstance( c, ComponentLevel1 ):
s._dsl.all_components.add( c )
s._collect_vars( c )
# Added here
elif isinstance( c, MethodPort ):
s._dsl.all_method_ports.add( c )
s._dsl.all_value_nets = s._resolve_value_connections()
# Added here
s._dsl.all_method_nets = s._resolve_method_connections()
s._dsl._has_pending_value_connections = False
s._dsl._has_pending_method_connections = False
| true | true |
1c35d5ef64b773afab4d2baa905b6a3c1f17a8aa | 5,270 | py | Python | discord/calls.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | null | null | null | discord/calls.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | null | null | null | discord/calls.py | Werseter/discord.py | 00a659c6526b2445162b52eaf970adbd22c6d35d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime
from . import utils
from .enums import VoiceRegion, try_enum
from .member import VoiceState
class CallMessage:
"""Represents a group call message from Discord.
This is only received in cases where the message type is equivalent to
:attr:`MessageType.call`.
Attributes
-----------
ended_timestamp: Optional[datetime.datetime]
A naive UTC datetime object that represents the time that the call has ended.
participants: List[:class:`User`]
The list of users that are participating in this call.
message: :class:`Message`
The message associated with this call message.
"""
def __init__(self, message, **kwargs):
self.message = message
self.ended_timestamp = utils.parse_time(kwargs.get('ended_timestamp'))
self.participants = kwargs.get('participants')
@property
def call_ended(self):
""":obj:`bool`: Indicates if the call has ended."""
return self.ended_timestamp is not None
@property
def channel(self):
""":class:`GroupChannel`\: The private channel associated with this message."""
return self.message.channel
@property
def duration(self):
"""Queries the duration of the call.
If the call has not ended then the current duration will
be returned.
Returns
---------
datetime.timedelta
The timedelta object representing the duration.
"""
if self.ended_timestamp is None:
return datetime.datetime.utcnow() - self.message.created_at
else:
return self.ended_timestamp - self.message.created_at
class GroupCall:
"""Represents the actual group call from Discord.
This is accompanied with a :class:`CallMessage` denoting the information.
Attributes
-----------
call: :class:`CallMessage`
The call message associated with this group call.
unavailable: :obj:`bool`
Denotes if this group call is unavailable.
ringing: List[:class:`User`]
A list of users that are currently being rung to join the call.
region: :class:`VoiceRegion`
The guild region the group call is being hosted on.
"""
def __init__(self, **kwargs):
self.call = kwargs.get('call')
self.unavailable = kwargs.get('unavailable')
self._voice_states = {}
for state in kwargs.get('voice_states', []):
self._update_voice_state(state)
self._update(**kwargs)
def _update(self, **kwargs):
self.region = try_enum(VoiceRegion, kwargs.get('region'))
lookup = {u.id: u for u in self.call.channel.recipients}
me = self.call.channel.me
lookup[me.id] = me
self.ringing = list(filter(None, map(lookup.get, kwargs.get('ringing', []))))
def _update_voice_state(self, data):
user_id = int(data['user_id'])
# left the voice channel?
if data['channel_id'] is None:
self._voice_states.pop(user_id, None)
else:
self._voice_states[user_id] = VoiceState(data=data, channel=self.channel)
@property
def connected(self):
"""A property that returns the :obj:`list` of :class:`User` that are currently in this call."""
ret = [u for u in self.channel.recipients if self.voice_state_for(u) is not None]
me = self.channel.me
if self.voice_state_for(me) is not None:
ret.append(me)
return ret
@property
def channel(self):
""":class:`GroupChannel`\: Returns the channel the group call is in."""
return self.call.channel
def voice_state_for(self, user):
"""Retrieves the :class:`VoiceState` for a specified :class:`User`.
If the :class:`User` has no voice state then this function returns
``None``.
Parameters
------------
user: :class:`User`
The user to retrieve the voice state for.
Returns
--------
Optional[:class:`VoiceState`]
The voice state associated with this user.
"""
return self._voice_states.get(user.id)
| 33.782051 | 103 | 0.660911 |
import datetime
from . import utils
from .enums import VoiceRegion, try_enum
from .member import VoiceState
class CallMessage:
def __init__(self, message, **kwargs):
self.message = message
self.ended_timestamp = utils.parse_time(kwargs.get('ended_timestamp'))
self.participants = kwargs.get('participants')
@property
def call_ended(self):
return self.ended_timestamp is not None
@property
def channel(self):
return self.message.channel
@property
def duration(self):
if self.ended_timestamp is None:
return datetime.datetime.utcnow() - self.message.created_at
else:
return self.ended_timestamp - self.message.created_at
class GroupCall:
def __init__(self, **kwargs):
self.call = kwargs.get('call')
self.unavailable = kwargs.get('unavailable')
self._voice_states = {}
for state in kwargs.get('voice_states', []):
self._update_voice_state(state)
self._update(**kwargs)
def _update(self, **kwargs):
self.region = try_enum(VoiceRegion, kwargs.get('region'))
lookup = {u.id: u for u in self.call.channel.recipients}
me = self.call.channel.me
lookup[me.id] = me
self.ringing = list(filter(None, map(lookup.get, kwargs.get('ringing', []))))
def _update_voice_state(self, data):
user_id = int(data['user_id'])
if data['channel_id'] is None:
self._voice_states.pop(user_id, None)
else:
self._voice_states[user_id] = VoiceState(data=data, channel=self.channel)
@property
def connected(self):
ret = [u for u in self.channel.recipients if self.voice_state_for(u) is not None]
me = self.channel.me
if self.voice_state_for(me) is not None:
ret.append(me)
return ret
@property
def channel(self):
return self.call.channel
def voice_state_for(self, user):
return self._voice_states.get(user.id)
| true | true |
1c35d6843446e8e93931c4d347edc5d320dcc788 | 520 | py | Python | backend_sqlalchemy/backend_app/models/findingsRel.py | jiz148/medical_app | 2f8b3f299ff6a87e62ac7483b6d2bac156a08874 | [
"MIT"
] | null | null | null | backend_sqlalchemy/backend_app/models/findingsRel.py | jiz148/medical_app | 2f8b3f299ff6a87e62ac7483b6d2bac156a08874 | [
"MIT"
] | null | null | null | backend_sqlalchemy/backend_app/models/findingsRel.py | jiz148/medical_app | 2f8b3f299ff6a87e62ac7483b6d2bac156a08874 | [
"MIT"
] | null | null | null | from backend_sqlalchemy.backend_app.db import db
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import declarative_base, relationship, backref
from backend_sqlalchemy.backend_app.models.visit import VisitModel
from backend_sqlalchemy.backend_app.models.findings import FindingsModel
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey, Integer, String
base = automap_base()
class FindingsRel(base):
__tablename__ = 'FindingsRel'
base.prepare(db.engine, reflect=True)
| 30.588235 | 90 | 0.834615 | from backend_sqlalchemy.backend_app.db import db
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import declarative_base, relationship, backref
from backend_sqlalchemy.backend_app.models.visit import VisitModel
from backend_sqlalchemy.backend_app.models.findings import FindingsModel
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey, Integer, String
base = automap_base()
class FindingsRel(base):
__tablename__ = 'FindingsRel'
base.prepare(db.engine, reflect=True)
| true | true |
1c35d71d1c705db39e3a1901e9c33bae29f5887b | 13,792 | py | Python | calico/felix/test/test_frules.py | fasaxc/felix | b5d58c0e9bad5fd3bfa81fc6ff5633dd40265622 | [
"Apache-2.0"
] | null | null | null | calico/felix/test/test_frules.py | fasaxc/felix | b5d58c0e9bad5fd3bfa81fc6ff5633dd40265622 | [
"Apache-2.0"
] | null | null | null | calico/felix/test/test_frules.py | fasaxc/felix | b5d58c0e9bad5fd3bfa81fc6ff5633dd40265622 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~
Tests for fiptables. Much of this module is tested in test_felix, but this covers
some parts that are not.
"""
from copy import copy
import logging
import mock
import unittest
import calico.felix.frules as frules
from calico.felix.futils import IPV4, IPV6, FailedSystemCall
import calico.felix.ipsets
import calico.felix.test.stub_ipsets as stub_ipsets
# Expected state
expected_ipsets = stub_ipsets.IpsetState()
# Logger
log = logging.getLogger(__name__)
class TestUpdateIpsets(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Completely replace the ipsets modules.
cls.real_ipsets = calico.felix.ipsets
frules.ipsets = stub_ipsets
@classmethod
def tearDownClass(cls):
# Reinstate the modules we overwrote
frules.ipsets = cls.real_ipsets
def setUp(self):
stub_ipsets.reset()
# Set the expected IP tables state to be clean.
expected_ipsets.reset()
def create_ipsets(self, family):
stub_ipsets.create("ipset_port", "hash:net,port", family)
stub_ipsets.create("ipset_addr", "hash:net", family)
stub_ipsets.create("ipset_icmp", "hash:net", family)
expected_ipsets.create("ipset_port", "hash:net,port", family)
expected_ipsets.create("ipset_addr", "hash:net", family)
expected_ipsets.create("ipset_icmp", "hash:net", family)
stub_ipsets.create("tmp_ipset_port", "hash:net,port", family)
stub_ipsets.create("tmp_ipset_addr", "hash:net", family)
stub_ipsets.create("tmp_ipset_icmp", "hash:net", family)
expected_ipsets.create("tmp_ipset_port", "hash:net,port", family)
expected_ipsets.create("tmp_ipset_addr", "hash:net", family)
expected_ipsets.create("tmp_ipset_icmp", "hash:net", family)
if family == "inet":
addr = "9.8.7.6/24"
else:
addr = "9:8:7::6/64"
# Shove some junk into ipsets that will be tidied away.
stub_ipsets.add("ipset_addr", addr)
stub_ipsets.add("ipset_port", addr + ",tcp:123")
stub_ipsets.add("ipset_icmp", addr)
def tearDown(self):
pass
def test_empty_ipsets(self):
"""
Empty ipsets.
"""
description = "Description : blah"
suffix = "whatever"
rule_list = []
self.create_ipsets("inet")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv4_ipsets(self):
"""
IPv4 ipsets
"""
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "1.2.3.4/24"
self.create_ipsets("inet")
# Ignored rules
rule_list.append({ 'blah': "junk" }) # no CIDR
rule_list.append({ 'cidr': "junk" }) # junk CIDR
rule_list.append({ 'cidr': "::/64" }) # IPv6, not v4
rule_list.append({ 'cidr': default_cidr,
'port': 123 }) # port, no protocol
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" }) # bad port
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] }) # bad port range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] }) # bad port in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" }) # code without type
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" }) # port not allowed for protocol
# Better rules
rule_list.append({ 'cidr': "1.2.3.4/24" })
expected_ipsets.add("ipset_addr", "1.2.3.4/24")
rule_list.append({ 'cidr': "10.0.10.0/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "0.0.0.0/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "128.0.0.0/1,tcp:1-65535")
rule_list.append({ 'cidr': "1.0.0.1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.1/8,udp:2-10")
rule_list.append({ 'cidr': "1.0.0.2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1.0.0.2/8,sctp:2")
rule_list.append({ 'cidr': "1.0.0.3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.3/8,udplite:2-10")
rule_list.append({ 'cidr': "1.0.0.4/8",
'protocol': "icmp" })
expected_ipsets.add("ipset_icmp", "1.0.0.4/8")
rule_list.append({ 'cidr': "1.0.0.5/8",
'protocol': "icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1.0.0.5/8,icmp:123/0")
rule_list.append({ 'cidr': "1.0.0.6/8",
'protocol': "icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1.0.0.6/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.7/8",
'protocol': "icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1.0.0.7/8,icmp:123/code")
rule_list.append({ 'cidr': "1.0.0.8/8",
'protocol': "icmp",
'icmp_type': "type",
'icmp_code': "code"}) # code ignored
expected_ipsets.add("ipset_port", "1.0.0.8/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1.0.0.9/8,blah:0")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv6_ipsets(self):
"""
IPv6 ipsets
"""
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "2001::1:2:3:4/24"
self.create_ipsets("inet6")
# Ignored rules
rule_list.append({ 'blah': "junk" }) # no CIDR
rule_list.append({ 'cidr': "junk" }) # junk CIDR
rule_list.append({ 'cidr': "1.2.3.4/32" }) # IPv4, not v6
rule_list.append({ 'cidr': default_cidr,
'port': 123 }) # port, no protocol
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" }) # bad port
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] }) # bad port range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] }) # bad port in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" }) # code without type
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" }) # port not allowed for protocol
# Better rules
rule_list.append({ 'cidr': "1:2:3::4/24" })
expected_ipsets.add("ipset_addr", "1:2:3::4/24")
rule_list.append({ 'cidr': "1:2:3::/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "::/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "8000::/1,tcp:1-65535")
rule_list.append({ 'cidr': "1::1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::1/8,udp:2-10")
rule_list.append({ 'cidr': "1::2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1::2/8,sctp:2")
rule_list.append({ 'cidr': "1::3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::3/8,udplite:2-10")
rule_list.append({ 'cidr': "1::4/8",
'protocol': "ipv6-icmp" })
expected_ipsets.add("ipset_icmp", "1::4/8")
rule_list.append({ 'cidr': "1::5/8",
'protocol': "ipv6-icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1::5/8,ipv6-icmp:123/0")
rule_list.append({ 'cidr': "1::6/8",
'protocol': "ipv6-icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1::6/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::7/8",
'protocol': "ipv6-icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1::7/8,ipv6-icmp:123/code")
rule_list.append({ 'cidr': "1::8/8",
'protocol': "ipv6-icmp",
'icmp_type': "type",
'icmp_code': "code"}) # code ignored
expected_ipsets.add("ipset_port", "1::8/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1::9/8,blah:0")
frules.update_ipsets(IPV6,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_exception(self):
"""
Test exception when adding ipset value.
"""
description = "description"
suffix = "suffix"
rule_list = [{'cidr': "1.2.3.4/24"}]
self.create_ipsets("inet")
with mock.patch('calico.felix.test.stub_ipsets.add',
side_effect=FailedSystemCall("oops", [], 1, "", "")):
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
| 38.52514 | 81 | 0.47339 |
from copy import copy
import logging
import mock
import unittest
import calico.felix.frules as frules
from calico.felix.futils import IPV4, IPV6, FailedSystemCall
import calico.felix.ipsets
import calico.felix.test.stub_ipsets as stub_ipsets
expected_ipsets = stub_ipsets.IpsetState()
log = logging.getLogger(__name__)
class TestUpdateIpsets(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.real_ipsets = calico.felix.ipsets
frules.ipsets = stub_ipsets
@classmethod
def tearDownClass(cls):
frules.ipsets = cls.real_ipsets
def setUp(self):
stub_ipsets.reset()
expected_ipsets.reset()
def create_ipsets(self, family):
stub_ipsets.create("ipset_port", "hash:net,port", family)
stub_ipsets.create("ipset_addr", "hash:net", family)
stub_ipsets.create("ipset_icmp", "hash:net", family)
expected_ipsets.create("ipset_port", "hash:net,port", family)
expected_ipsets.create("ipset_addr", "hash:net", family)
expected_ipsets.create("ipset_icmp", "hash:net", family)
stub_ipsets.create("tmp_ipset_port", "hash:net,port", family)
stub_ipsets.create("tmp_ipset_addr", "hash:net", family)
stub_ipsets.create("tmp_ipset_icmp", "hash:net", family)
expected_ipsets.create("tmp_ipset_port", "hash:net,port", family)
expected_ipsets.create("tmp_ipset_addr", "hash:net", family)
expected_ipsets.create("tmp_ipset_icmp", "hash:net", family)
if family == "inet":
addr = "9.8.7.6/24"
else:
addr = "9:8:7::6/64"
stub_ipsets.add("ipset_addr", addr)
stub_ipsets.add("ipset_port", addr + ",tcp:123")
stub_ipsets.add("ipset_icmp", addr)
def tearDown(self):
pass
def test_empty_ipsets(self):
description = "Description : blah"
suffix = "whatever"
rule_list = []
self.create_ipsets("inet")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv4_ipsets(self):
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "1.2.3.4/24"
self.create_ipsets("inet")
rule_list.append({ 'blah': "junk" })
rule_list.append({ 'cidr': "junk" })
rule_list.append({ 'cidr': "::/64" })
rule_list.append({ 'cidr': default_cidr,
'port': 123 })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" })
rule_list.append({ 'cidr': "1.2.3.4/24" })
expected_ipsets.add("ipset_addr", "1.2.3.4/24")
rule_list.append({ 'cidr': "10.0.10.0/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "0.0.0.0/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "128.0.0.0/1,tcp:1-65535")
rule_list.append({ 'cidr': "1.0.0.1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.1/8,udp:2-10")
rule_list.append({ 'cidr': "1.0.0.2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1.0.0.2/8,sctp:2")
rule_list.append({ 'cidr': "1.0.0.3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.3/8,udplite:2-10")
rule_list.append({ 'cidr': "1.0.0.4/8",
'protocol': "icmp" })
expected_ipsets.add("ipset_icmp", "1.0.0.4/8")
rule_list.append({ 'cidr': "1.0.0.5/8",
'protocol': "icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1.0.0.5/8,icmp:123/0")
rule_list.append({ 'cidr': "1.0.0.6/8",
'protocol': "icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1.0.0.6/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.7/8",
'protocol': "icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1.0.0.7/8,icmp:123/code")
rule_list.append({ 'cidr': "1.0.0.8/8",
'protocol': "icmp",
'icmp_type': "type",
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1.0.0.8/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1.0.0.9/8,blah:0")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv6_ipsets(self):
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "2001::1:2:3:4/24"
self.create_ipsets("inet6")
rule_list.append({ 'blah': "junk" })
rule_list.append({ 'cidr': "junk" })
rule_list.append({ 'cidr': "1.2.3.4/32" })
rule_list.append({ 'cidr': default_cidr,
'port': 123 })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] })
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" })
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" })
rule_list.append({ 'cidr': "1:2:3::4/24" })
expected_ipsets.add("ipset_addr", "1:2:3::4/24")
rule_list.append({ 'cidr': "1:2:3::/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "::/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "8000::/1,tcp:1-65535")
rule_list.append({ 'cidr': "1::1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::1/8,udp:2-10")
rule_list.append({ 'cidr': "1::2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1::2/8,sctp:2")
rule_list.append({ 'cidr': "1::3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1::3/8,udplite:2-10")
rule_list.append({ 'cidr': "1::4/8",
'protocol': "ipv6-icmp" })
expected_ipsets.add("ipset_icmp", "1::4/8")
rule_list.append({ 'cidr': "1::5/8",
'protocol': "ipv6-icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1::5/8,ipv6-icmp:123/0")
rule_list.append({ 'cidr': "1::6/8",
'protocol': "ipv6-icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1::6/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::7/8",
'protocol': "ipv6-icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1::7/8,ipv6-icmp:123/code")
rule_list.append({ 'cidr': "1::8/8",
'protocol': "ipv6-icmp",
'icmp_type': "type",
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1::8/8,ipv6-icmp:type")
rule_list.append({ 'cidr': "1::9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1::9/8,blah:0")
frules.update_ipsets(IPV6,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_exception(self):
description = "description"
suffix = "suffix"
rule_list = [{'cidr': "1.2.3.4/24"}]
self.create_ipsets("inet")
with mock.patch('calico.felix.test.stub_ipsets.add',
side_effect=FailedSystemCall("oops", [], 1, "", "")):
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
| true | true |
1c35d76f9d1528b71dca25ece2b7e7615442d6a2 | 79,661 | py | Python | archive/fabfile.py | louis-pre/NewsBlur | b4e9a56041ff187ef77b38dfd0778daf41b53f4f | [
"MIT"
] | 3,073 | 2015-01-01T07:20:18.000Z | 2022-03-31T20:33:41.000Z | archive/fabfile.py | louis-pre/NewsBlur | b4e9a56041ff187ef77b38dfd0778daf41b53f4f | [
"MIT"
] | 1,054 | 2015-01-02T13:32:35.000Z | 2022-03-30T04:21:21.000Z | archive/fabfile.py | louis-pre/NewsBlur | b4e9a56041ff187ef77b38dfd0778daf41b53f4f | [
"MIT"
] | 676 | 2015-01-03T16:40:29.000Z | 2022-03-30T14:00:40.000Z | from fabric.api import cd, lcd, env, local, parallel, serial
from fabric.api import put, run, settings, sudo, prefix
from fabric.operations import prompt
from fabric.contrib import django
from fabric.contrib import files
from fabric.state import connections
# from fabric.colors import red, green, blue, cyan, magenta, white, yellow
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.ec2.connection import EC2Connection
import yaml
from pprint import pprint
from collections import defaultdict
from contextlib import contextmanager as _contextmanager
import os
import time
import sys
import re
# django.setup()
try:
import digitalocean
except ImportError:
print("Digital Ocean's API not loaded. Install python-digitalocean.")
django.settings_module('newsblur_web.settings')
try:
from django.conf import settings as django_settings
except ImportError:
print(" ---> Django not installed yet.")
django_settings = None
# ============
# = DEFAULTS =
# ============
env.NEWSBLUR_PATH = "/srv/newsblur"
env.SECRETS_PATH = "/srv/secrets-newsblur"
env.VENDOR_PATH = "/srv/code"
env.user = 'sclay'
env.key_filename = os.path.join(env.SECRETS_PATH, 'keys/newsblur.key')
env.connection_attempts = 10
env.do_ip_to_hostname = {}
env.colorize_errors = True
# =========
# = Roles =
# =========
try:
hosts_path = os.path.expanduser(os.path.join(env.SECRETS_PATH, 'configs/hosts.yml'))
roles = yaml.load(open(hosts_path))
for role_name, hosts in list(roles.items()):
if isinstance(hosts, dict):
roles[role_name] = [host for host in list(hosts.keys())]
env.roledefs = roles
except:
print(" ***> No role definitions found in %s. Using default roles." % hosts_path)
env.roledefs = {
'app' : ['app01.newsblur.com'],
'db' : ['db01.newsblur.com'],
'task' : ['task01.newsblur.com'],
}
def do_roledefs(split=False, debug=False):
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
droplets = doapi.get_all_droplets()
env.do_ip_to_hostname = {}
hostnames = {}
for droplet in droplets:
roledef = re.split(r"([0-9]+)", droplet.name)[0]
if roledef not in env.roledefs:
env.roledefs[roledef] = []
if roledef not in hostnames:
hostnames[roledef] = []
if droplet.ip_address not in hostnames[roledef]:
hostnames[roledef].append({'name': droplet.name, 'address': droplet.ip_address})
env.do_ip_to_hostname[droplet.ip_address] = droplet.name
if droplet.ip_address not in env.roledefs[roledef]:
env.roledefs[roledef].append(droplet.ip_address)
if split:
return hostnames
return droplets
def list_do():
droplets = assign_digitalocean_roledefs(split=True)
pprint(droplets)
# Uncomment below to print all IP addresses
# for group in droplets.values():
# for server in group:
# if 'address' in server:
# print(server['address'])
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
droplets = doapi.get_all_droplets()
sizes = doapi.get_all_sizes()
sizes = dict((size.slug, size.price_monthly) for size in sizes)
role_costs = defaultdict(int)
total_cost = 0
for droplet in droplets:
roledef = re.split(r"([0-9]+)", droplet.name)[0]
cost = droplet.size['price_monthly']
role_costs[roledef] += cost
total_cost += cost
print("\n\n Costs:")
pprint(dict(role_costs))
print(" ---> Total cost: $%s/month" % total_cost)
def host(*names):
env.hosts = []
env.doname = ','.join(names)
hostnames = assign_digitalocean_roledefs(split=True)
for role, hosts in list(hostnames.items()):
for host in hosts:
if isinstance(host, dict) and host['name'] in names:
env.hosts.append(host['address'])
print(" ---> Using %s as hosts" % env.hosts)
# ================
# = Environments =
# ================
def server():
env.NEWSBLUR_PATH = "/srv/newsblur"
env.VENDOR_PATH = "/srv/code"
def assign_digitalocean_roledefs(split=False):
server()
droplets = do_roledefs(split=split)
if split:
for roledef, hosts in list(env.roledefs.items()):
if roledef not in droplets:
droplets[roledef] = hosts
return droplets
def app():
assign_digitalocean_roledefs()
env.roles = ['app']
def web():
assign_digitalocean_roledefs()
env.roles = ['app', 'push', 'work', 'search']
def work():
assign_digitalocean_roledefs()
env.roles = ['work']
def www():
assign_digitalocean_roledefs()
env.roles = ['www']
def dev():
assign_digitalocean_roledefs()
env.roles = ['dev']
def debug():
assign_digitalocean_roledefs()
env.roles = ['debug']
def node():
assign_digitalocean_roledefs()
env.roles = ['node']
def push():
assign_digitalocean_roledefs()
env.roles = ['push']
def db():
assign_digitalocean_roledefs()
env.roles = ['db', 'search']
def task():
assign_digitalocean_roledefs()
env.roles = ['task']
def ec2task():
ec2()
env.roles = ['ec2task']
def ec2():
env.user = 'ubuntu'
env.key_filename = ['/Users/sclay/.ec2/sclay.pem']
assign_digitalocean_roledefs()
def all():
assign_digitalocean_roledefs()
env.roles = ['app', 'db', 'debug', 'node', 'push', 'work', 'www', 'search']
# =============
# = Bootstrap =
# =============
def setup_common():
setup_installs()
change_shell()
setup_user()
setup_sudoers()
setup_ulimit()
setup_do_monitoring()
setup_libxml()
setup_psql_client()
setup_repo()
setup_local_files()
setup_time_calibration()
setup_pip()
setup_virtualenv()
setup_repo_local_settings()
pip()
setup_supervisor()
setup_hosts()
setup_pgbouncer()
config_pgbouncer()
setup_mongoengine_repo()
# setup_forked_mongoengine()
# setup_pymongo_repo()
setup_logrotate()
copy_certificates()
setup_nginx()
setup_munin()
def setup_all():
setup_common()
setup_app(skip_common=True)
setup_db(skip_common=True)
setup_task(skip_common=True)
def setup_app_docker(skip_common=False):
if not skip_common:
setup_common()
setup_app_firewall()
setup_motd('app')
change_shell()
setup_user()
setup_sudoers()
setup_ulimit()
setup_do_monitoring()
setup_repo()
setup_local_files()
# setup_time_calibration()
setup_docker()
done()
sudo('reboot')
def setup_app(skip_common=False, node=False):
if not skip_common:
setup_common()
setup_app_firewall()
setup_motd('app')
copy_app_settings()
config_nginx()
setup_gunicorn(supervisor=True)
if node:
setup_node()
deploy_web()
config_monit_app()
setup_usage_monitor()
done()
sudo('reboot')
def setup_app_image():
copy_app_settings()
setup_hosts()
config_pgbouncer()
pull()
pip()
deploy_web()
done()
sudo('reboot')
def setup_node():
setup_node_app()
config_node(full=True)
def setup_db(engine=None, skip_common=False, skip_benchmark=False):
if not skip_common:
setup_common()
setup_db_firewall()
setup_motd('db')
copy_db_settings()
if engine == "postgres":
setup_postgres(standby=False)
setup_postgres_backups()
elif engine == "postgres_slave":
setup_postgres(standby=True)
elif engine and engine.startswith("mongo"):
setup_mongo()
# setup_mongo_mms()
setup_mongo_backups()
elif engine == "redis":
setup_redis()
setup_redis_backups()
setup_redis_monitor()
elif engine == "redis_slave":
setup_redis(slave=True)
setup_redis_monitor()
elif engine == "elasticsearch":
setup_elasticsearch()
setup_db_search()
setup_gunicorn(supervisor=False)
setup_db_munin()
setup_db_monitor()
setup_usage_monitor()
if not skip_benchmark:
benchmark()
done()
# if env.user == 'ubuntu':
# setup_db_mdadm()
def setup_task(queue=None, skip_common=False):
if not skip_common:
setup_common()
setup_task_firewall()
setup_motd('task')
copy_task_settings()
enable_celery_supervisor(queue)
setup_gunicorn(supervisor=False)
config_monit_task()
setup_usage_monitor()
done()
sudo('reboot')
def setup_task_image():
setup_installs()
copy_task_settings()
setup_hosts()
config_pgbouncer()
pull()
pip()
deploy(reload=True)
done()
sudo('reboot')
# ==================
# = Setup - Docker =
# ==================
def setup_docker():
packages = [
'build-essential',
]
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install %s' % ' '.join(packages))
sudo('apt install -fy docker docker-compose')
sudo('usermod -aG docker ${USER}')
sudo('su - ${USER}')
copy_certificates()
# ==================
# = Setup - Common =
# ==================
def done():
print("\n\n\n\n-----------------------------------------------------")
print("\n\n %s / %s IS SUCCESSFULLY BOOTSTRAPPED" % (env.get('doname') or env.host_string, env.host_string))
print("\n\n-----------------------------------------------------\n\n\n\n")
def setup_installs():
packages = [
'build-essential',
'gcc',
'scons',
'libreadline-dev',
'sysstat',
'iotop',
'git',
'python2',
'python2.7-dev',
'locate',
'software-properties-common',
'libpcre3-dev',
'libncurses5-dev',
'libdbd-pg-perl',
'libssl-dev',
'libffi-dev',
'libevent-dev',
'make',
'postgresql-common',
'ssl-cert',
'python-setuptools',
'libyaml-0-2',
'pgbouncer',
'python-yaml',
'python-numpy',
'curl',
'monit',
'ufw',
'libjpeg8',
'libjpeg62-dev',
'libfreetype6',
'libfreetype6-dev',
'libmysqlclient-dev',
'libblas-dev',
'liblapack-dev',
'libatlas-base-dev',
'gfortran',
'libpq-dev',
]
# sudo("sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list")
put("config/apt_sources.conf", "/etc/apt/sources.list", use_sudo=True)
run('sleep 10') # Dies on a lock, so just delay
sudo('apt-get -y update')
run('sleep 10') # Dies on a lock, so just delay
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade')
run('sleep 10') # Dies on a lock, so just delay
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install %s' % ' '.join(packages))
with settings(warn_only=True):
sudo("ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib")
sudo("ln -s /usr/lib/x86_64-linux-gnu/libfreetype.so /usr/lib")
sudo("ln -s /usr/lib/x86_64-linux-gnu/libz.so /usr/lib")
with settings(warn_only=True):
sudo('mkdir -p %s' % env.VENDOR_PATH)
sudo('chown %s.%s %s' % (env.user, env.user, env.VENDOR_PATH))
def change_shell():
sudo('apt-get -fy install zsh')
with settings(warn_only=True):
run('git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh')
run('git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting')
sudo('chsh %s -s /bin/zsh' % env.user)
def setup_user():
# run('useradd -c "NewsBlur" -m newsblur -s /bin/zsh')
# run('openssl rand -base64 8 | tee -a ~conesus/.password | passwd -stdin conesus')
run('mkdir -p ~/.ssh && chmod 700 ~/.ssh')
run('rm -fr ~/.ssh/id_dsa*')
run('ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""')
run('touch ~/.ssh/authorized_keys')
put("~/.ssh/id_dsa.pub", "authorized_keys")
run("echo \"\n\" >> ~sclay/.ssh/authorized_keys")
run('echo `cat authorized_keys` >> ~sclay/.ssh/authorized_keys')
run('rm authorized_keys')
def copy_ssh_keys(username='sclay', private=False):
sudo('mkdir -p ~%s/.ssh' % username)
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key.pub'), 'local.key.pub')
sudo('mv local.key.pub ~%s/.ssh/id_rsa.pub' % username)
if private:
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key'), 'local.key')
sudo('mv local.key ~%s/.ssh/id_rsa' % username)
sudo("echo \"\n\" >> ~%s/.ssh/authorized_keys" % username)
sudo("echo `cat ~%s/.ssh/id_rsa.pub` >> ~%s/.ssh/authorized_keys" % (username, username))
sudo('chown -R %s.%s ~%s/.ssh' % (username, username, username))
sudo('chmod 700 ~%s/.ssh' % username)
sudo('chmod 600 ~%s/.ssh/id_rsa*' % username)
def setup_repo():
sudo('mkdir -p /srv')
sudo('chown -R %s.%s /srv' % (env.user, env.user))
with settings(warn_only=True):
run('git clone https://github.com/samuelclay/NewsBlur.git %s' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('ln -sfn /srv/code /home/%s/code' % env.user)
sudo('ln -sfn /srv/newsblur /home/%s/newsblur' % env.user)
def setup_repo_local_settings():
with virtualenv():
run('cp newsblur/local_settings.py.template newsblur/local_settings.py')
run('mkdir -p logs')
run('touch logs/newsblur.log')
def setup_local_files():
run('mkdir -p ~/.config/procps')
put("config/toprc", "~/.config/procps/toprc")
run('rm -f ~/.toprc')
put("config/zshrc", "~/.zshrc")
put('config/gitconfig.txt', '~/.gitconfig')
put('config/ssh.conf', '~/.ssh/config')
def setup_psql_client():
sudo('apt-get -y install postgresql-client')
sudo('mkdir -p /var/run/postgresql')
with settings(warn_only=True):
sudo('chown postgres.postgres /var/run/postgresql')
def setup_libxml():
sudo('apt-get -y install libxml2-dev libxslt1-dev python-lxml')
def setup_libxml_code():
with cd(env.VENDOR_PATH):
run('git clone git://git.gnome.org/libxml2')
run('git clone git://git.gnome.org/libxslt')
with cd(os.path.join(env.VENDOR_PATH, 'libxml2')):
run('./configure && make && sudo make install')
with cd(os.path.join(env.VENDOR_PATH, 'libxslt')):
run('./configure && make && sudo make install')
def setup_psycopg():
sudo('easy_install -U psycopg2')
def setup_virtualenv():
sudo('rm -fr ~/.cache') # Clean `sudo pip`
sudo('pip install --upgrade virtualenv')
sudo('pip install --upgrade virtualenvwrapper')
setup_local_files()
with prefix('WORKON_HOME=%s' % os.path.join(env.NEWSBLUR_PATH, 'venv')):
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with cd(env.NEWSBLUR_PATH):
# sudo('rmvirtualenv newsblur')
# sudo('rm -fr venv')
with settings(warn_only=True):
run('mkvirtualenv newsblur')
# run('echo "import sys; sys.setdefaultencoding(\'utf-8\')" | sudo tee venv/newsblur/lib/python2.7/sitecustomize.py')
# run('echo "/srv/newsblur" | sudo tee venv/newsblur/lib/python2.7/site-packages/newsblur.pth')
@_contextmanager
def virtualenv():
with prefix('WORKON_HOME=%s' % os.path.join(env.NEWSBLUR_PATH, 'venv')):
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with cd(env.NEWSBLUR_PATH):
with prefix('workon newsblur'):
yield
def setup_pip():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('curl https://bootstrap.pypa.io/2.6/get-pip.py | sudo python2')
# sudo('python2 get-pip.py')
@parallel
def pip():
role = role_for_host()
pull()
with virtualenv():
if role == "task":
with settings(warn_only=True):
sudo('fallocate -l 4G /swapfile')
sudo('chmod 600 /swapfile')
sudo('mkswap /swapfile')
sudo('swapon /swapfile')
sudo('chown %s.%s -R %s' % (env.user, env.user, os.path.join(env.NEWSBLUR_PATH, 'venv')))
# run('easy_install -U pip')
# run('pip install --upgrade pip')
# run('pip install --upgrade setuptools')
run('pip install -r requirements.txt')
if role == "task":
with settings(warn_only=True):
sudo('swapoff /swapfile')
def solo_pip(role):
if role == "app":
gunicorn_stop()
pip()
deploy_code(reload=True)
elif role == "task":
celery_stop()
copy_task_settings()
pip()
celery()
def setup_supervisor():
sudo('apt-get update')
sudo('apt-get -y install supervisor')
put('config/supervisord.conf', '/etc/supervisor/supervisord.conf', use_sudo=True)
sudo('/etc/init.d/supervisor stop')
sudo('sleep 2')
sudo('ulimit -n 100000 && /etc/init.d/supervisor start')
sudo("/usr/sbin/update-rc.d -f supervisor defaults")
sudo('systemctl enable supervisor')
sudo('systemctl start supervisor')
@parallel
def setup_hosts():
put(os.path.join(env.SECRETS_PATH, 'configs/hosts'), '/etc/hosts', use_sudo=True)
sudo('echo "\n\n127.0.0.1 `hostname`" | sudo tee -a /etc/hosts')
def setup_pgbouncer():
sudo('apt-get remove -y pgbouncer')
sudo('apt-get install -y libevent-dev pkg-config libc-ares2 libc-ares-dev')
PGBOUNCER_VERSION = '1.15.0'
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('wget https://pgbouncer.github.io/downloads/files/%s/pgbouncer-%s.tar.gz' % (PGBOUNCER_VERSION, PGBOUNCER_VERSION))
run('tar -xzf pgbouncer-%s.tar.gz' % PGBOUNCER_VERSION)
run('rm pgbouncer-%s.tar.gz' % PGBOUNCER_VERSION)
with cd('pgbouncer-%s' % PGBOUNCER_VERSION):
run('./configure --prefix=/usr/local')
run('make')
sudo('make install')
sudo('ln -s /usr/local/bin/pgbouncer /usr/sbin/pgbouncer')
config_pgbouncer()
def config_pgbouncer():
sudo('mkdir -p /etc/pgbouncer')
put('config/pgbouncer.conf', 'pgbouncer.conf')
sudo('mv pgbouncer.conf /etc/pgbouncer/pgbouncer.ini')
put(os.path.join(env.SECRETS_PATH, 'configs/pgbouncer_auth.conf'), 'userlist.txt')
sudo('mv userlist.txt /etc/pgbouncer/userlist.txt')
sudo('echo "START=1" | sudo tee /etc/default/pgbouncer')
# sudo('su postgres -c "/etc/init.d/pgbouncer stop"', pty=False)
with settings(warn_only=True):
sudo('/etc/init.d/pgbouncer stop')
sudo('pkill -9 pgbouncer -e')
run('sleep 2')
sudo('/etc/init.d/pgbouncer start', pty=False)
@parallel
def kill_pgbouncer(stop=False):
# sudo('su postgres -c "/etc/init.d/pgbouncer stop"', pty=False)
with settings(warn_only=True):
sudo('/etc/init.d/pgbouncer stop')
run('sleep 2')
sudo('rm /var/log/postgresql/pgbouncer.pid')
with settings(warn_only=True):
sudo('pkill -9 pgbouncer')
run('sleep 2')
if not stop:
run('sudo /etc/init.d/pgbouncer start', pty=False)
def config_monit_task():
put('config/monit_task.conf', '/etc/monit/conf.d/celery.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_node():
put('config/monit_node.conf', '/etc/monit/conf.d/node.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_original():
put('config/monit_original.conf', '/etc/monit/conf.d/node_original.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_app():
put('config/monit_app.conf', '/etc/monit/conf.d/gunicorn.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_work():
put('config/monit_work.conf', '/etc/monit/conf.d/work.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_redis():
sudo('chown root.root /etc/init.d/redis')
sudo('chmod a+x /etc/init.d/redis')
put('config/monit_debug.sh', '/etc/monit/monit_debug.sh', use_sudo=True)
sudo('chmod a+x /etc/monit/monit_debug.sh')
put('config/monit_redis.conf', '/etc/monit/conf.d/redis.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def setup_mongoengine_repo():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('rm -fr mongoengine')
run('git clone https://github.com/MongoEngine/mongoengine.git')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/mongoengine')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/mongoengine-*')
sudo('ln -sfn %s /usr/local/lib/python2.7/dist-packages/mongoengine' %
os.path.join(env.VENDOR_PATH, 'mongoengine/mongoengine'))
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')), settings(warn_only=True):
run('git co v0.8.2')
def clear_pymongo_repo():
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/pymongo*')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/bson*')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/gridfs*')
def setup_pymongo_repo():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('git clone git://github.com/mongodb/mongo-python-driver.git pymongo')
# with cd(os.path.join(env.VENDOR_PATH, 'pymongo')):
# sudo('python setup.py install')
clear_pymongo_repo()
sudo('ln -sfn %s /usr/local/lib/python2.7/dist-packages/' %
os.path.join(env.VENDOR_PATH, 'pymongo/{pymongo,bson,gridfs}'))
def setup_forked_mongoengine():
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')), settings(warn_only=True):
run('git remote add clay https://github.com/samuelclay/mongoengine.git')
run('git pull')
run('git fetch clay')
run('git checkout -b clay_master clay/master')
def switch_forked_mongoengine():
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')):
run('git co dev')
run('git pull %s dev --force' % env.user)
# run('git checkout .')
# run('git checkout master')
# run('get branch -D dev')
# run('git checkout -b dev origin/dev')
def setup_logrotate(clear=True):
if clear:
run('find /srv/newsblur/logs/*.log | xargs tee')
with settings(warn_only=True):
sudo('find /var/log/mongodb/*.log | xargs tee')
put('config/logrotate.conf', '/etc/logrotate.d/newsblur', use_sudo=True)
put('config/logrotate.mongo.conf', '/etc/logrotate.d/mongodb', use_sudo=True)
put('config/logrotate.nginx.conf', '/etc/logrotate.d/nginx', use_sudo=True)
sudo('chown root.root /etc/logrotate.d/{newsblur,mongodb,nginx}')
sudo('chmod 644 /etc/logrotate.d/{newsblur,mongodb,nginx}')
with settings(warn_only=True):
sudo('chown sclay.sclay /srv/newsblur/logs/*.log')
sudo('logrotate -f /etc/logrotate.d/newsblur')
sudo('logrotate -f /etc/logrotate.d/nginx')
sudo('logrotate -f /etc/logrotate.d/mongodb')
def setup_ulimit():
# Increase File Descriptor limits.
run('export FILEMAX=`sysctl -n fs.file-max`', pty=False)
sudo('mv /etc/security/limits.conf /etc/security/limits.conf.bak', pty=False)
sudo('touch /etc/security/limits.conf', pty=False)
run('echo "root soft nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "root hard nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "* soft nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "* hard nofile 100090\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "fs.file-max = 100000\n" | sudo tee -a /etc/sysctl.conf', pty=False)
sudo('sysctl -p')
sudo('ulimit -n 100000')
connections.connect(env.host_string)
# run('touch /home/ubuntu/.bash_profile')
# run('echo "ulimit -n $FILEMAX" >> /home/ubuntu/.bash_profile')
# Increase Ephemeral Ports.
# sudo chmod 666 /etc/sysctl.conf
# echo "net.ipv4.ip_local_port_range = 1024 65535" >> /etc/sysctl.conf
# sudo chmod 644 /etc/sysctl.conf
def setup_do_monitoring():
run('curl -sSL https://agent.digitalocean.com/install.sh | sh')
def setup_syncookies():
sudo('echo 1 | sudo tee /proc/sys/net/ipv4/tcp_syncookies')
sudo('sudo /sbin/sysctl -w net.ipv4.tcp_syncookies=1')
def setup_sudoers(user=None):
sudo('echo "%s ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/sclay' % (user or env.user))
sudo('chmod 0440 /etc/sudoers.d/sclay')
def setup_nginx():
NGINX_VERSION = '1.19.5'
with cd(env.VENDOR_PATH), settings(warn_only=True):
sudo("groupadd nginx")
sudo("useradd -g nginx -d /var/www/htdocs -s /bin/false nginx")
run('wget http://nginx.org/download/nginx-%s.tar.gz' % NGINX_VERSION)
run('tar -xzf nginx-%s.tar.gz' % NGINX_VERSION)
run('rm nginx-%s.tar.gz' % NGINX_VERSION)
with cd('nginx-%s' % NGINX_VERSION):
run('./configure --with-http_ssl_module --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module ')
run('make')
sudo('make install')
config_nginx()
def config_nginx():
put("config/nginx.conf", "/usr/local/nginx/conf/nginx.conf", use_sudo=True)
sudo("mkdir -p /usr/local/nginx/conf/sites-enabled")
sudo("mkdir -p /var/log/nginx")
put("config/nginx.newsblur.conf", "/usr/local/nginx/conf/sites-enabled/newsblur.conf", use_sudo=True)
put("config/nginx-init", "/etc/init.d/nginx", use_sudo=True)
sudo('sed -i -e s/nginx_none/`cat /etc/hostname`/g /usr/local/nginx/conf/sites-enabled/newsblur.conf')
sudo("chmod 0755 /etc/init.d/nginx")
sudo("/usr/sbin/update-rc.d -f nginx defaults")
sudo("/etc/init.d/nginx restart")
copy_certificates()
# ===============
# = Setup - App =
# ===============
def setup_app_firewall():
sudo('ufw default deny')
sudo('ufw allow ssh') # ssh
sudo('ufw allow 80') # http
sudo('ufw allow 8000') # gunicorn
sudo('ufw allow 8888') # socket.io
sudo('ufw allow 8889') # socket.io ssl
sudo('ufw allow 443') # https
sudo('ufw --force enable')
def remove_gunicorn():
with cd(env.VENDOR_PATH):
sudo('rm -fr gunicorn')
def setup_gunicorn(supervisor=True, restart=True):
if supervisor:
put('config/supervisor_gunicorn.conf', '/etc/supervisor/conf.d/gunicorn.conf', use_sudo=True)
sudo('supervisorctl reread')
if restart:
sudo('supervisorctl update')
# with cd(env.VENDOR_PATH):
# sudo('rm -fr gunicorn')
# run('git clone git://github.com/benoitc/gunicorn.git')
# with cd(os.path.join(env.VENDOR_PATH, 'gunicorn')):
# run('git pull')
# sudo('python setup.py develop')
def update_gunicorn():
with cd(os.path.join(env.VENDOR_PATH, 'gunicorn')):
run('git pull')
sudo('python setup.py develop')
def setup_staging():
run('git clone https://github.com/samuelclay/NewsBlur.git staging')
with cd('~/staging'):
run('cp ../newsblur/local_settings.py local_settings.py')
run('mkdir -p logs')
run('touch logs/newsblur.log')
def setup_node_app():
sudo('curl -sL https://deb.nodesource.com/setup_14.x | sudo bash -')
sudo('apt-get install -y nodejs')
# run('curl -L https://npmjs.org/install.sh | sudo sh')
# sudo('apt-get install npm')
sudo('sudo npm install -g npm')
sudo('npm install -g supervisor')
sudo('ufw allow 8888')
sudo('ufw allow 4040')
def config_node(full=False):
sudo('rm -f /etc/supervisor/conf.d/gunicorn.conf')
sudo('rm -f /etc/supervisor/conf.d/node.conf')
put('config/supervisor_node_unread.conf', '/etc/supervisor/conf.d/node_unread.conf', use_sudo=True)
put('config/supervisor_node_unread_ssl.conf', '/etc/supervisor/conf.d/node_unread_ssl.conf', use_sudo=True)
put('config/supervisor_node_favicons.conf', '/etc/supervisor/conf.d/node_favicons.conf', use_sudo=True)
put('config/supervisor_node_text.conf', '/etc/supervisor/conf.d/node_text.conf', use_sudo=True)
if full:
run("rm -fr /srv/newsblur/node/node_modules")
with cd(os.path.join(env.NEWSBLUR_PATH, "node")):
run("npm install")
sudo('supervisorctl reload')
@parallel
def copy_app_settings():
run('rm -f %s/local_settings.py' % env.NEWSBLUR_PATH)
put(os.path.join(env.SECRETS_PATH, 'settings/app_settings.py'),
'%s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
run('echo "\nSERVER_NAME = \\\\"`hostname`\\\\"" >> %s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
def assemble_certificates():
with lcd(os.path.join(env.SECRETS_PATH, 'certificates/comodo')):
local('pwd')
local('cat STAR_newsblur_com.crt EssentialSSLCA_2.crt ComodoUTNSGCCA.crt UTNAddTrustSGCCA.crt AddTrustExternalCARoot.crt > newsblur.com.crt')
def copy_certificates(copy=False):
cert_path = os.path.join(env.NEWSBLUR_PATH, 'config/certificates')
run('mkdir -p %s' % cert_path)
fullchain_path = "/etc/letsencrypt/live/newsblur.com/fullchain.pem"
privkey_path = "/etc/letsencrypt/live/newsblur.com/privkey.pem"
if copy:
sudo('mkdir -p %s' % os.path.dirname(fullchain_path))
put(os.path.join(env.SECRETS_PATH, 'certificates/newsblur.com.pem'), fullchain_path, use_sudo=True)
put(os.path.join(env.SECRETS_PATH, 'certificates/newsblur.com.key'), privkey_path, use_sudo=True)
run('ln -fs %s %s' % (fullchain_path, os.path.join(cert_path, 'newsblur.com.crt')))
run('ln -fs %s %s' % (fullchain_path, os.path.join(cert_path, 'newsblur.com.pem'))) # For backwards compatibility with hard-coded nginx configs
run('ln -fs %s %s' % (privkey_path, os.path.join(cert_path, 'newsblur.com.key')))
run('ln -fs %s %s' % (privkey_path, os.path.join(cert_path, 'newsblur.com.crt.key'))) # HAProxy
put(os.path.join(env.SECRETS_PATH, 'certificates/comodo/dhparams.pem'), cert_path)
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps_development.pem'), cert_path)
# Export aps.cer from Apple issued certificate using Keychain Assistant
# openssl x509 -in aps.cer -inform DER -outform PEM -out aps.pem
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.pem'), cert_path)
# Export aps.p12 from aps.cer using Keychain Assistant
# openssl pkcs12 -in aps.p12 -out aps.p12.pem -nodes
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.p12.pem'), cert_path)
def setup_certbot():
sudo('snap install --classic certbot')
sudo('snap set certbot trust-plugin-with-root=ok')
sudo('snap install certbot-dns-dnsimple')
sudo('ln -fs /snap/bin/certbot /usr/bin/certbot')
put(os.path.join(env.SECRETS_PATH, 'configs/certbot.conf'),
os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
sudo('chmod 0600 %s' % os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
sudo('certbot certonly -n --agree-tos '
' --dns-dnsimple --dns-dnsimple-credentials %s'
' --email samuel@newsblur.com --domains newsblur.com '
' -d "*.newsblur.com" -d "popular.global.newsblur.com"' %
(os.path.join(env.NEWSBLUR_PATH, 'certbot.conf')))
sudo('chmod 0755 /etc/letsencrypt/{live,archive}')
sudo('chmod 0755 /etc/letsencrypt/archive/newsblur.com/privkey1.pem')
# def setup_certbot_old():
# sudo('add-apt-repository -y universe')
# sudo('add-apt-repository -y ppa:certbot/certbot')
# sudo('apt-get update')
# sudo('apt-get install -y certbot')
# sudo('apt-get install -y python3-certbot-dns-dnsimple')
# put(os.path.join(env.SECRETS_PATH, 'configs/certbot.conf'),
# os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
# sudo('chmod 0600 %s' % os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
# sudo('certbot certonly -n --agree-tos '
# ' --dns-dnsimple --dns-dnsimple-credentials %s'
# ' --email samuel@newsblur.com --domains newsblur.com '
# ' -d "*.newsblur.com" -d "global.popular.newsblur.com"' %
# (os.path.join(env.NEWSBLUR_PATH, 'certbot.conf')))
# sudo('chmod 0755 /etc/letsencrypt/{live,archive}')
# sudo('chmod 0755 /etc/letsencrypt/archive/newsblur.com/privkey1.pem')
@parallel
def maintenance_on():
role = role_for_host()
if role in ['work', 'search']:
sudo('supervisorctl stop all')
else:
put('templates/maintenance_off.html', '%s/templates/maintenance_off.html' % env.NEWSBLUR_PATH)
with virtualenv():
run('mv templates/maintenance_off.html templates/maintenance_on.html')
@parallel
def maintenance_off():
role = role_for_host()
if role in ['work', 'search']:
sudo('supervisorctl start all')
else:
with virtualenv():
run('mv templates/maintenance_on.html templates/maintenance_off.html')
run('git checkout templates/maintenance_off.html')
def setup_haproxy(debug=False):
version = "2.3.3"
sudo('ufw allow 81') # nginx moved
sudo('ufw allow 1936') # haproxy stats
# sudo('apt-get install -y haproxy')
# sudo('apt-get remove -y haproxy')
with cd(env.VENDOR_PATH):
run('wget http://www.haproxy.org/download/2.3/src/haproxy-%s.tar.gz' % version)
run('tar -xf haproxy-%s.tar.gz' % version)
with cd('haproxy-%s' % version):
run('make TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1')
sudo('make install')
put('config/haproxy-init', '/etc/init.d/haproxy', use_sudo=True)
sudo('chmod u+x /etc/init.d/haproxy')
sudo('mkdir -p /etc/haproxy')
if debug:
put('config/debug_haproxy.conf', '/etc/haproxy/haproxy.cfg', use_sudo=True)
else:
build_haproxy()
put(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'),
'/etc/haproxy/haproxy.cfg', use_sudo=True)
sudo('echo "ENABLED=1" | sudo tee /etc/default/haproxy')
cert_path = "%s/config/certificates" % env.NEWSBLUR_PATH
run('cat %s/newsblur.com.crt > %s/newsblur.pem' % (cert_path, cert_path))
run('cat %s/newsblur.com.key >> %s/newsblur.pem' % (cert_path, cert_path))
run('ln -s %s/newsblur.com.key %s/newsblur.pem.key' % (cert_path, cert_path))
put('config/haproxy_rsyslog.conf', '/etc/rsyslog.d/49-haproxy.conf', use_sudo=True)
# sudo('restart rsyslog')
sudo('update-rc.d -f haproxy defaults')
sudo('/etc/init.d/haproxy stop')
run('sleep 5')
sudo('/etc/init.d/haproxy start')
def config_haproxy(debug=False):
if debug:
put('config/debug_haproxy.conf', '/etc/haproxy/haproxy.cfg', use_sudo=True)
else:
build_haproxy()
put(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'),
'/etc/haproxy/haproxy.cfg', use_sudo=True)
haproxy_check = run('haproxy -c -f /etc/haproxy/haproxy.cfg')
if haproxy_check.return_code == 0:
sudo('/etc/init.d/haproxy reload')
else:
print(" !!!> Uh-oh, HAProxy config doesn't check out: %s" % haproxy_check.return_code)
def build_haproxy():
droplets = assign_digitalocean_roledefs(split=True)
servers = defaultdict(list)
gunicorn_counts_servers = ['app22', 'app26']
gunicorn_refresh_servers = ['app20', 'app21']
maintenance_servers = ['app20']
node_socket3_servers = ['node02', 'node03']
ignore_servers = []
for group_type in ['app', 'push', 'work', 'node_socket', 'node_socket3', 'node_favicon', 'node_text', 'www']:
group_type_name = group_type
if 'node' in group_type:
group_type_name = 'node'
for server in droplets[group_type_name]:
droplet_nums = re.findall(r'\d+', server['name'])
droplet_num = droplet_nums[0] if droplet_nums else ''
server_type = group_type
port = 80
check_inter = 3000
if server['name'] in ignore_servers:
print(" ---> Ignoring %s" % server['name'])
continue
if server['name'] in node_socket3_servers and group_type != 'node_socket3':
continue
if server['name'] not in node_socket3_servers and group_type == 'node_socket3':
continue
if server_type == 'www':
port = 81
if group_type == 'node_socket':
port = 8888
if group_type == 'node_socket3':
port = 8888
if group_type == 'node_text':
port = 4040
if group_type in ['app', 'push']:
port = 8000
address = "%s:%s" % (server['address'], port)
if server_type == 'app':
nginx_address = "%s:80" % (server['address'])
servers['nginx'].append(" server nginx%-15s %-22s check inter 3000ms" % (droplet_num, nginx_address))
if server['name'] in maintenance_servers:
nginx_address = "%s:80" % (server['address'])
servers['maintenance'].append(" server nginx%-15s %-22s check inter 3000ms" % (droplet_num, nginx_address))
if server['name'] in gunicorn_counts_servers:
server_type = 'gunicorn_counts'
check_inter = 15000
elif server['name'] in gunicorn_refresh_servers:
server_type = 'gunicorn_refresh'
check_inter = 30000
server_name = "%s%s" % (server_type, droplet_num)
servers[server_type].append(" server %-20s %-22s check inter %sms" % (server_name, address, check_inter))
h = open(os.path.join(env.NEWSBLUR_PATH, 'config/haproxy.conf.template'), 'r')
haproxy_template = h.read()
for sub, server_list in list(servers.items()):
sorted_servers = '\n'.join(sorted(server_list))
haproxy_template = haproxy_template.replace("{{ %s }}" % sub, sorted_servers)
f = open(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'), 'w')
f.write(haproxy_template)
f.close()
def upgrade_django(role=None):
if not role:
role = role_for_host()
with virtualenv(), settings(warn_only=True):
sudo('sudo dpkg --configure -a')
setup_supervisor()
pull()
run('git co django1.11')
if role == "task":
sudo('supervisorctl stop celery')
run('./utils/kill_celery.sh')
copy_task_settings()
enable_celery_supervisor(update=False)
elif role == "work":
copy_app_settings()
enable_celerybeat()
elif role == "web" or role == "app":
sudo('supervisorctl stop gunicorn')
run('./utils/kill_gunicorn.sh')
copy_app_settings()
setup_gunicorn(restart=False)
elif role == "node":
copy_app_settings()
config_node(full=True)
else:
copy_task_settings()
pip()
clean()
# sudo('reboot')
def clean():
with virtualenv(), settings(warn_only=True):
run('find . -name "*.pyc" -exec rm -f {} \;')
def downgrade_django(role=None):
with virtualenv(), settings(warn_only=True):
pull()
run('git co master')
pip()
run('pip uninstall -y django-paypal')
if role == "task":
copy_task_settings()
enable_celery_supervisor()
else:
copy_app_settings()
deploy()
def vendorize_paypal():
with virtualenv(), settings(warn_only=True):
run('pip uninstall -y django-paypal')
def upgrade_pil():
with virtualenv():
pull()
run('pip install --upgrade pillow')
# celery_stop()
sudo('apt-get remove -y python-imaging')
sudo('supervisorctl reload')
# kill()
def downgrade_pil():
with virtualenv():
sudo('apt-get install -y python-imaging')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/Pillow*')
pull()
sudo('supervisorctl reload')
# kill()
def setup_db_monitor():
pull()
with virtualenv():
sudo('apt-get install -y libpq-dev python2.7-dev')
run('pip install -r flask/requirements.txt')
put('flask/supervisor_db_monitor.conf', '/etc/supervisor/conf.d/db_monitor.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
# ==============
# = Setup - DB =
# ==============
@parallel
def setup_db_firewall():
ports = [
5432, # PostgreSQL
27017, # MongoDB
28017, # MongoDB web
27019, # MongoDB config
6379, # Redis
# 11211, # Memcached
3060, # Node original page server
9200, # Elasticsearch
5000, # DB Monitor
]
sudo('ufw --force reset')
sudo('ufw default deny')
sudo('ufw allow ssh')
sudo('ufw allow 80')
sudo('ufw allow 443')
# DigitalOcean
for ip in set(env.roledefs['app'] +
env.roledefs['db'] +
env.roledefs['debug'] +
env.roledefs['task'] +
env.roledefs['work'] +
env.roledefs['push'] +
env.roledefs['www'] +
env.roledefs['search'] +
env.roledefs['node']):
sudo('ufw allow proto tcp from %s to any port %s' % (
ip,
','.join(map(str, ports))
))
# EC2
# for host in set(env.roledefs['ec2task']):
# ip = re.search('ec2-(\d+-\d+-\d+-\d+)', host).group(1).replace('-', '.')
# sudo('ufw allow proto tcp from %s to any port %s' % (
# ip,
# ','.join(map(str, ports))
# ))
sudo('ufw --force enable')
def setup_rabbitmq():
sudo('echo "deb http://www.rabbitmq.com/debian/ testing main" | sudo tee -a /etc/apt/sources.list')
run('wget http://www.rabbitmq.com/rabbitmq-signing-key-public.asc')
sudo('apt-key add rabbitmq-signing-key-public.asc')
run('rm rabbitmq-signing-key-public.asc')
sudo('apt-get update')
sudo('apt-get install -y rabbitmq-server')
sudo('rabbitmqctl add_user newsblur newsblur')
sudo('rabbitmqctl add_vhost newsblurvhost')
sudo('rabbitmqctl set_permissions -p newsblurvhost newsblur ".*" ".*" ".*"')
# def setup_memcached():
# sudo('apt-get -y install memcached')
def setup_postgres(standby=False):
shmmax = 17818362112
hugepages = 9000
sudo('echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list')
sudo('wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -')
sudo('apt update')
sudo('apt install -y postgresql-13')
put('config/postgresql-13.conf', '/etc/postgresql/13/main/postgresql.conf', use_sudo=True)
put('config/postgres_hba-13.conf', '/etc/postgresql/13/main/pg_hba.conf', use_sudo=True)
sudo('mkdir -p /var/lib/postgresql/13/archive')
sudo('chown -R postgres.postgres /etc/postgresql/13/main')
sudo('chown -R postgres.postgres /var/lib/postgresql/13/main')
sudo('chown -R postgres.postgres /var/lib/postgresql/13/archive')
sudo('echo "%s" | sudo tee /proc/sys/kernel/shmmax' % shmmax)
sudo('echo "\nkernel.shmmax = %s" | sudo tee -a /etc/sysctl.conf' % shmmax)
sudo('echo "\nvm.nr_hugepages = %s\n" | sudo tee -a /etc/sysctl.conf' % hugepages)
run('echo "ulimit -n 100000" > postgresql.defaults')
sudo('mv postgresql.defaults /etc/default/postgresql')
sudo('sysctl -p')
sudo('rm -f /lib/systemd/system/postgresql.service') # Ubuntu 16 has wrong default
sudo('systemctl daemon-reload')
sudo('systemctl enable postgresql')
if standby:
put('config/postgresql_recovery.conf', '/var/lib/postgresql/13/recovery.conf', use_sudo=True)
sudo('chown -R postgres.postgres /var/lib/postgresql/13/recovery.conf')
sudo('/etc/init.d/postgresql stop')
sudo('/etc/init.d/postgresql start')
def config_postgres(standby=False):
put('config/postgresql-13.conf', '/etc/postgresql/13/main/postgresql.conf', use_sudo=True)
put('config/postgres_hba.conf', '/etc/postgresql/13/main/pg_hba.conf', use_sudo=True)
sudo('chown postgres.postgres /etc/postgresql/13/main/postgresql.conf')
run('echo "ulimit -n 100000" > postgresql.defaults')
sudo('mv postgresql.defaults /etc/default/postgresql')
sudo('/etc/init.d/postgresql reload 13')
def upgrade_postgres():
sudo('su postgres -c "/usr/lib/postgresql/10/bin/pg_upgrade -b /usr/lib/postgresql/9.4/bin -B /usr/lib/postgresql/10/bin -d /var/lib/postgresql/9.4/main -D /var/lib/postgresql/10/main"')
def copy_postgres_to_standby(master='db01'):
# http://www.rassoc.com/gregr/weblog/2013/02/16/zero-to-postgresql-streaming-replication-in-10-mins/
# Make sure you can ssh from master to slave and back with the postgres user account.
# Need to give postgres accounts keys in authroized_keys.
# local: fab host:new copy_ssh_keys:postgres,private=True
# new: sudo su postgres; ssh old
# new: sudo su postgres; ssh db_pgsql
# old: sudo su postgres; ssh new
# old: sudo su postgres -c "psql -c \"SELECT pg_start_backup('label', true)\""
sudo('systemctl stop postgresql')
sudo('mkdir -p /var/lib/postgresql/9.4/archive')
sudo('chown postgres.postgres /var/lib/postgresql/9.4/archive')
with settings(warn_only=True):
sudo('su postgres -c "rsync -Pav -e \'ssh -i ~postgres/.ssh/newsblur.key\' --stats --progress postgres@%s:/var/lib/postgresql/9.4/main /var/lib/postgresql/9.4/ --exclude postmaster.pid"' % master)
put('config/postgresql_recovery.conf', '/var/lib/postgresql/9.4/main/recovery.conf', use_sudo=True)
sudo('systemctl start postgresql')
# old: sudo su postgres -c "psql -c \"SELECT pg_stop_backup()\""
# Don't forget to add 'setup_postgres_backups' to new
def disable_thp():
put('config/disable_transparent_hugepages.sh', '/etc/init.d/disable-transparent-hugepages', use_sudo=True)
sudo('chmod 755 /etc/init.d/disable-transparent-hugepages')
sudo('update-rc.d disable-transparent-hugepages defaults')
def setup_mongo():
MONGODB_VERSION = "3.4.24"
pull()
disable_thp()
sudo('systemctl enable rc-local.service') # Enable rc.local
sudo('echo "#!/bin/sh -e\n\nif test -f /sys/kernel/mm/transparent_hugepage/enabled; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/enabled\n\
fi\n\
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/defrag\n\
fi\n\n\
exit 0" | sudo tee /etc/rc.local')
sudo('curl -fsSL https://www.mongodb.org/static/pgp/server-3.4.asc | sudo apt-key add -')
# sudo('echo "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" | sudo tee /etc/apt/sources.list.d/mongodb.list')
# sudo('echo "\ndeb http://downloads-distro.mongodb.org/repo/debian-sysvinit dist 10gen" | sudo tee -a /etc/apt/sources.list')
# sudo('echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list')
sudo('echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.4 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.4.list')
sudo('apt-get update')
sudo('apt-get install -y mongodb-org=%s mongodb-org-server=%s mongodb-org-shell=%s mongodb-org-mongos=%s mongodb-org-tools=%s' %
(MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION))
put('config/mongodb.%s.conf' % ('prod' if env.user != 'ubuntu' else 'ec2'),
'/etc/mongodb.conf', use_sudo=True)
put('config/mongodb.service', '/etc/systemd/system/mongodb.service', use_sudo=True)
run('echo "ulimit -n 100000" > mongodb.defaults')
sudo('mv mongodb.defaults /etc/default/mongod')
sudo('mkdir -p /var/log/mongodb')
sudo('chown mongodb /var/log/mongodb')
put('config/logrotate.mongo.conf', '/etc/logrotate.d/mongod', use_sudo=True)
sudo('systemctl enable mongodb')
# Reclaim 5% disk space used for root logs. Set to 1%.
with settings(warn_only=True):
sudo('tune2fs -m 1 /dev/vda1')
def setup_mongo_configsvr():
sudo('mkdir -p /var/lib/mongodb_configsvr')
sudo('chown mongodb.mongodb /var/lib/mongodb_configsvr')
put('config/mongodb.configsvr.conf', '/etc/mongodb.configsvr.conf', use_sudo=True)
put('config/mongodb.configsvr-init', '/etc/init.d/mongodb-configsvr', use_sudo=True)
sudo('chmod u+x /etc/init.d/mongodb-configsvr')
run('echo "ulimit -n 100000" > mongodb_configsvr.defaults')
sudo('mv mongodb_configsvr.defaults /etc/default/mongodb_configsvr')
sudo('update-rc.d -f mongodb-configsvr defaults')
sudo('/etc/init.d/mongodb-configsvr start')
def setup_mongo_mongos():
put('config/mongodb.mongos.conf', '/etc/mongodb.mongos.conf', use_sudo=True)
put('config/mongodb.mongos-init', '/etc/init.d/mongodb-mongos', use_sudo=True)
sudo('chmod u+x /etc/init.d/mongodb-mongos')
run('echo "ulimit -n 100000" > mongodb_mongos.defaults')
sudo('mv mongodb_mongos.defaults /etc/default/mongodb_mongos')
sudo('update-rc.d -f mongodb-mongos defaults')
sudo('/etc/init.d/mongodb-mongos restart')
def setup_mongo_mms():
pull()
sudo('rm -f /etc/supervisor/conf.d/mongomms.conf')
sudo('supervisorctl reread')
sudo('supervisorctl update')
with cd(env.VENDOR_PATH):
sudo('apt-get remove -y mongodb-mms-monitoring-agent')
run('curl -OL https://mms.mongodb.com/download/agent/monitoring/mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
sudo('dpkg -i mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
run('rm mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
put(os.path.join(env.SECRETS_PATH, 'settings/mongo_mms_config.txt'),
'mongo_mms_config.txt')
sudo("echo \"\n\" | sudo tee -a /etc/mongodb-mms/monitoring-agent.config")
sudo('cat mongo_mms_config.txt | sudo tee -a /etc/mongodb-mms/monitoring-agent.config')
sudo('start mongodb-mms-monitoring-agent')
def setup_redis(slave=False):
redis_version = '3.2.6'
with cd(env.VENDOR_PATH):
run('wget http://download.redis.io/releases/redis-%s.tar.gz' % redis_version)
run('tar -xzf redis-%s.tar.gz' % redis_version)
run('rm redis-%s.tar.gz' % redis_version)
with cd(os.path.join(env.VENDOR_PATH, 'redis-%s' % redis_version)):
sudo('make install')
put('config/redis-init', '/etc/init.d/redis', use_sudo=True)
sudo('chmod u+x /etc/init.d/redis')
put('config/redis.conf', '/etc/redis.conf', use_sudo=True)
if slave:
put('config/redis_slave.conf', '/etc/redis_server.conf', use_sudo=True)
else:
put('config/redis_master.conf', '/etc/redis_server.conf', use_sudo=True)
# sudo('chmod 666 /proc/sys/vm/overcommit_memory', pty=False)
# run('echo "1" > /proc/sys/vm/overcommit_memory', pty=False)
# sudo('chmod 644 /proc/sys/vm/overcommit_memory', pty=False)
disable_thp()
sudo('systemctl enable rc-local.service') # Enable rc.local
sudo('echo "#!/bin/sh -e\n\nif test -f /sys/kernel/mm/transparent_hugepage/enabled; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/enabled\n\
fi\n\
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/defrag\n\
fi\n\n\
exit 0" | sudo tee /etc/rc.local')
sudo("echo 1 | sudo tee /proc/sys/vm/overcommit_memory")
sudo('echo "vm.overcommit_memory = 1" | sudo tee -a /etc/sysctl.conf')
sudo("sysctl vm.overcommit_memory=1")
put('config/redis_rclocal.txt', '/etc/rc.local', use_sudo=True)
sudo("chown root.root /etc/rc.local")
sudo("chmod a+x /etc/rc.local")
sudo('echo "never" | sudo tee /sys/kernel/mm/transparent_hugepage/enabled')
run('echo "\nnet.core.somaxconn=65535\n" | sudo tee -a /etc/sysctl.conf', pty=False)
sudo('mkdir -p /var/lib/redis')
sudo('update-rc.d redis defaults')
sudo('/etc/init.d/redis stop')
sudo('/etc/init.d/redis start')
setup_syncookies()
config_monit_redis()
def setup_munin():
sudo('apt-get update')
sudo('apt-get install -y munin munin-node munin-plugins-extra spawn-fcgi')
put('config/munin.conf', '/etc/munin/munin.conf', use_sudo=True) # Only use on main munin
put('config/spawn_fcgi_munin_graph.conf', '/etc/init.d/spawn_fcgi_munin_graph', use_sudo=True)
put('config/spawn_fcgi_munin_html.conf', '/etc/init.d/spawn_fcgi_munin_html', use_sudo=True)
sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_graph')
sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_html')
with settings(warn_only=True):
sudo('chown nginx.www-data /var/log/munin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*')
with settings(warn_only=True):
sudo('/etc/init.d/spawn_fcgi_munin_graph stop')
sudo('/etc/init.d/spawn_fcgi_munin_graph start')
sudo('update-rc.d spawn_fcgi_munin_graph defaults')
sudo('/etc/init.d/spawn_fcgi_munin_html stop')
sudo('/etc/init.d/spawn_fcgi_munin_html start')
sudo('update-rc.d spawn_fcgi_munin_html defaults')
sudo('/etc/init.d/munin-node stop')
time.sleep(2)
sudo('/etc/init.d/munin-node start')
with settings(warn_only=True):
sudo('chown nginx.www-data /var/log/munin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*')
sudo('chmod a+rw /var/log/munin/*')
with settings(warn_only=True):
sudo('/etc/init.d/spawn_fcgi_munin_graph start')
sudo('/etc/init.d/spawn_fcgi_munin_html start')
def copy_munin_data(from_server):
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key'), '~/.ssh/newsblur.key')
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key.pub'), '~/.ssh/newsblur.key.pub')
run('chmod 600 ~/.ssh/newsblur*')
# put("config/munin.nginx.conf", "/usr/local/nginx/conf/sites-enabled/munin.conf", use_sudo=True)
sudo('/etc/init.d/nginx reload')
run("rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/var/lib/munin/ /srv/munin" % from_server)
sudo('rm -fr /var/lib/bak-munin')
sudo("mv /var/lib/munin /var/lib/bak-munin")
sudo("mv /srv/munin /var/lib/")
sudo("chown munin.munin -R /var/lib/munin")
run("sudo rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/etc/munin/ /srv/munin-etc" % from_server)
sudo('rm -fr /etc/munin')
sudo("mv /srv/munin-etc /etc/munin")
sudo("chown munin.munin -R /etc/munin")
run("sudo rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/var/cache/munin/www/ /srv/munin-www" % from_server)
sudo('rm -fr /var/cache/munin/www')
sudo("mv /srv/munin-www /var/cache/munin/www")
sudo("chown munin.munin -R /var/cache/munin/www")
sudo("/etc/init.d/munin restart")
sudo("/etc/init.d/munin-node restart")
def setup_db_munin():
sudo('rm -f /etc/munin/plugins/mongo*')
sudo('rm -f /etc/munin/plugins/pg_*')
sudo('rm -f /etc/munin/plugins/redis_*')
sudo('cp -frs %s/config/munin/mongo* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('cp -frs %s/config/munin/pg_* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('cp -frs %s/config/munin/redis_* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('/etc/init.d/munin-node stop')
time.sleep(2)
sudo('/etc/init.d/munin-node start')
def enable_celerybeat():
with virtualenv():
run('mkdir -p data')
put('config/supervisor_celerybeat.conf', '/etc/supervisor/conf.d/celerybeat.conf', use_sudo=True)
put('config/supervisor_celeryd_work_queue.conf', '/etc/supervisor/conf.d/celeryd_work_queue.conf', use_sudo=True)
put('config/supervisor_celeryd_beat.conf', '/etc/supervisor/conf.d/celeryd_beat.conf', use_sudo=True)
put('config/supervisor_celeryd_beat_feeds.conf', '/etc/supervisor/conf.d/celeryd_beat_feeds.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def setup_db_mdadm():
sudo('apt-get -y install xfsprogs mdadm')
sudo('yes | mdadm --create /dev/md0 --level=0 -c256 --raid-devices=4 /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi')
sudo('mkfs.xfs /dev/md0')
sudo('mkdir -p /srv/db')
sudo('mount -t xfs -o rw,nobarrier,noatime,nodiratime /dev/md0 /srv/db')
sudo('mkdir -p /srv/db/mongodb')
sudo('chown mongodb.mongodb /srv/db/mongodb')
sudo("echo 'DEVICE /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi' | sudo tee -a /etc/mdadm/mdadm.conf")
sudo("mdadm --examine --scan | sudo tee -a /etc/mdadm/mdadm.conf")
sudo("echo '/dev/md0 /srv/db xfs rw,nobarrier,noatime,nodiratime,noauto 0 0' | sudo tee -a /etc/fstab")
sudo("sudo update-initramfs -u -v -k `uname -r`")
def setup_original_page_server():
setup_node_app()
sudo('mkdir -p /srv/originals')
sudo('chown %s.%s -R /srv/originals' % (env.user, env.user)) # We assume that the group is the same name as the user. It's common on linux
config_monit_original()
put('config/supervisor_node_original.conf',
'/etc/supervisor/conf.d/node_original.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl reload')
def setup_elasticsearch():
ES_VERSION = "2.4.4"
sudo('add-apt-repository -y ppa:openjdk-r/ppa')
sudo('apt-get update')
sudo('apt-get install openjdk-7-jre -y')
with cd(env.VENDOR_PATH):
run('mkdir -p elasticsearch-%s' % ES_VERSION)
with cd(os.path.join(env.VENDOR_PATH, 'elasticsearch-%s' % ES_VERSION)):
# run('wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-%s.deb' % ES_VERSION) # For v5+
run('wget http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-%s.deb' % ES_VERSION) # For v1-v2
sudo('dpkg -i elasticsearch-%s.deb' % ES_VERSION)
if not files.exists('/usr/share/elasticsearch/plugins/head'):
sudo('/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head')
def setup_db_search():
put('config/supervisor_celeryd_search_indexer.conf', '/etc/supervisor/conf.d/celeryd_search_indexer.conf', use_sudo=True)
put('config/supervisor_celeryd_search_indexer_tasker.conf', '/etc/supervisor/conf.d/celeryd_search_indexer_tasker.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def setup_imageproxy(install_go=False):
# sudo('apt-get update')
# sudo('apt-get install -y golang')
if install_go:
with cd(env.VENDOR_PATH):
with settings(warn_only=True):
run('git clone https://github.com/willnorris/imageproxy.git')
run('wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz')
run('tar -xzf go1.13.3.linux-amd64.tar.gz')
run('rm go1.13.3.linux-amd64.tar.gz')
sudo('rm /usr/bin/go')
sudo('ln -s /srv/code/go/bin/go /usr/bin/go')
with cd(os.path.join(env.VENDOR_PATH, 'imageproxy')):
run('go get willnorris.com/go/imageproxy/cmd/imageproxy')
put(os.path.join(env.SECRETS_PATH, 'settings/imageproxy.key'),
'/etc/imageproxy.key', use_sudo=True)
put(os.path.join(env.NEWSBLUR_PATH, 'config/supervisor_imageproxy.conf'), '/etc/supervisor/conf.d/supervisor_imageproxy.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
sudo('ufw allow 443')
sudo('ufw allow 80')
put(os.path.join(env.NEWSBLUR_PATH, 'config/nginx.imageproxy.conf'), "/usr/local/nginx/conf/sites-enabled/imageproxy.conf", use_sudo=True)
sudo("/etc/init.d/nginx restart")
@parallel
def setup_usage_monitor():
sudo('ln -fs %s/utils/monitor_disk_usage.py /etc/cron.daily/monitor_disk_usage' % env.NEWSBLUR_PATH)
sudo('/etc/cron.daily/monitor_disk_usage')
@parallel
def setup_feeds_fetched_monitor():
sudo('ln -fs %s/utils/monitor_task_fetches.py /etc/cron.hourly/monitor_task_fetches' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_task_fetches')
@parallel
def setup_newsletter_monitor():
sudo('ln -fs %s/utils/monitor_newsletter_delivery.py /etc/cron.hourly/monitor_newsletter_delivery' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_newsletter_delivery')
@parallel
def setup_queue_monitor():
sudo('ln -fs %s/utils/monitor_work_queue.py /etc/cron.hourly/monitor_work_queue' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_work_queue')
@parallel
def setup_redis_monitor():
run('sleep 5') # Wait for redis to startup so the log file is there
sudo('ln -fs %s/utils/monitor_redis_bgsave.py /etc/cron.daily/monitor_redis_bgsave' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('/etc/cron.daily/monitor_redis_bgsave')
# ================
# = Setup - Task =
# ================
def setup_task_firewall():
sudo('ufw default deny')
sudo('ufw allow ssh')
sudo('ufw allow 80')
sudo('ufw --force enable')
def setup_motd(role='app'):
motd = '/etc/update-motd.d/22-newsblur-motd'
put('config/motd_%s.txt' % role, motd, use_sudo=True)
sudo('chown root.root %s' % motd)
sudo('chmod a+x %s' % motd)
def enable_celery_supervisor(queue=None, update=True):
if not queue:
put('config/supervisor_celeryd.conf', '/etc/supervisor/conf.d/celeryd.conf', use_sudo=True)
else:
put('config/supervisor_celeryd_%s.conf' % queue, '/etc/supervisor/conf.d/celeryd.conf', use_sudo=True)
sudo('supervisorctl reread')
if update:
sudo('supervisorctl update')
@parallel
def copy_db_settings():
return copy_task_settings()
@parallel
def copy_task_settings():
server_hostname = run('hostname')
# if any([(n in server_hostname) for n in ['task', 'db', 'search', 'node', 'push']]):
host = server_hostname
# elif env.host:
# host = env.host.split('.', 2)[0]
# else:
# host = env.host_string.split('.', 2)[0]
with settings(warn_only=True):
run('rm -f %s/local_settings.py' % env.NEWSBLUR_PATH)
put(os.path.join(env.SECRETS_PATH, 'settings/task_settings.py'),
'%s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
run('echo "\nSERVER_NAME = \\\\"%s\\\\"" >> %s/newsblur/local_settings.py' % (host, env.NEWSBLUR_PATH))
@parallel
def copy_spam():
put(os.path.join(env.SECRETS_PATH, 'spam/spam.py'), '%s/apps/social/spam.py' % env.NEWSBLUR_PATH)
# =========================
# = Setup - Digital Ocean =
# =========================
DO_SIZES = {
'1': 's-1vcpu-1gb',
'2': 's-1vcpu-2gb',
'4': 's-2vcpu-4gb',
'8': 's-4vcpu-8gb',
'16': 's-6vcpu-16gb',
'32': 's-8vcpu-32gb',
'48': 's-12vcpu-48gb',
'64': 's-16vcpu-64gb',
'32c': 'c-16',
}
def setup_do(name, size=1, image=None):
instance_size = DO_SIZES[str(size)]
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
# droplets = doapi.get_all_droplets()
# sizes = dict((s.slug, s.slug) for s in doapi.get_all_sizes())
ssh_key_ids = [k.id for k in doapi.get_all_sshkeys()]
if not image:
image = "ubuntu-20-04-x64"
else:
images = dict((s.name, s.id) for s in doapi.get_all_images())
if image == "task":
image = images["task-2018-02"]
elif image == "app":
image = images["app-2018-02"]
else:
images = dict((s.name, s.id) for s in doapi.get_all_images())
print(images)
name = do_name(name)
env.doname = name
print("Creating droplet: %s" % name)
instance = digitalocean.Droplet(token=django_settings.DO_TOKEN_FABRIC,
name=name,
size_slug=instance_size,
image=image,
region='nyc1',
monitoring=True,
private_networking=True,
ssh_keys=ssh_key_ids)
instance.create()
time.sleep(2)
instance = digitalocean.Droplet.get_object(django_settings.DO_TOKEN_FABRIC, instance.id)
print("Booting droplet: %s / %s (size: %s)" % (instance.name, instance.ip_address, instance_size))
i = 0
while True:
if instance.status == 'active':
print("...booted: %s" % instance.ip_address)
time.sleep(5)
break
elif instance.status == 'new':
print(".", end=' ')
sys.stdout.flush()
instance = digitalocean.Droplet.get_object(django_settings.DO_TOKEN_FABRIC, instance.id)
i += 1
time.sleep(i)
else:
print("!!! Error: %s" % instance.status)
return
host = instance.ip_address
env.host_string = host
time.sleep(20)
add_user_to_do()
assign_digitalocean_roledefs()
def do_name(name):
if re.search(r"[0-9]", name):
print(" ---> Using %s as hostname" % name)
return name
else:
hosts = do_roledefs(split=False)
hostnames = [host.name for host in hosts]
existing_hosts = [hostname for hostname in hostnames if name in hostname]
for i in range(1, 100):
try_host = "%s%02d" % (name, i)
if try_host not in existing_hosts:
print(" ---> %s hosts in %s (%s). %s is unused." % (len(existing_hosts), name,
', '.join(existing_hosts), try_host))
return try_host
def add_user_to_do():
env.user = "root"
repo_user = "sclay"
with settings(warn_only=True):
run('useradd -m %s' % (repo_user))
setup_sudoers("%s" % (repo_user))
run('mkdir -p ~%s/.ssh && chmod 700 ~%s/.ssh' % (repo_user, repo_user))
run('rm -fr ~%s/.ssh/id_dsa*' % (repo_user))
run('ssh-keygen -t dsa -f ~%s/.ssh/id_dsa -N ""' % (repo_user))
run('touch ~%s/.ssh/authorized_keys' % (repo_user))
copy_ssh_keys()
run('chown %s.%s -R ~%s/.ssh' % (repo_user, repo_user, repo_user))
env.user = repo_user
# ===============
# = Setup - EC2 =
# ===============
def setup_ec2():
AMI_NAME = 'ami-834cf1ea' # Ubuntu 64-bit 12.04 LTS
# INSTANCE_TYPE = 'c1.medium'
INSTANCE_TYPE = 'c1.medium'
conn = EC2Connection(django_settings.AWS_ACCESS_KEY_ID, django_settings.AWS_SECRET_ACCESS_KEY)
reservation = conn.run_instances(AMI_NAME, instance_type=INSTANCE_TYPE,
key_name=env.user,
security_groups=['db-mongo'])
instance = reservation.instances[0]
print("Booting reservation: %s/%s (size: %s)" % (reservation, instance, INSTANCE_TYPE))
i = 0
while True:
if instance.state == 'pending':
print(".", end=' ')
sys.stdout.flush()
instance.update()
i += 1
time.sleep(i)
elif instance.state == 'running':
print("...booted: %s" % instance.public_dns_name)
time.sleep(5)
break
else:
print("!!! Error: %s" % instance.state)
return
host = instance.public_dns_name
env.host_string = host
# ==========
# = Deploy =
# ==========
@parallel
def pull(master=False):
with virtualenv():
run('git pull')
if master:
run('git checkout master')
run('git pull')
def pre_deploy():
compress_assets(bundle=True)
@serial
def post_deploy():
cleanup_assets()
def role_for_host():
for role, hosts in list(env.roledefs.items()):
if env.host in hosts:
return role
@parallel
def deploy(fast=False, reload=False):
role = role_for_host()
if role in ['work', 'search', 'debug']:
deploy_code(copy_assets=False, fast=fast, reload=True)
else:
deploy_code(copy_assets=False, fast=fast, reload=reload)
@parallel
def deploy_web(fast=False):
role = role_for_host()
if role in ['work', 'search']:
deploy_code(copy_assets=True, fast=fast, reload=True)
else:
deploy_code(copy_assets=True, fast=fast)
@parallel
def deploy_rebuild(fast=False):
deploy_code(copy_assets=True, fast=fast, rebuild=True)
@parallel
def kill_gunicorn():
with virtualenv():
sudo('pkill -9 -u %s -f gunicorn_django' % env.user)
@parallel
def deploy_code(copy_assets=False, rebuild=False, fast=False, reload=False):
with virtualenv():
run('git pull')
run('mkdir -p static')
if rebuild:
run('rm -fr static/*')
if copy_assets:
transfer_assets()
with virtualenv():
with settings(warn_only=True):
if reload:
sudo('supervisorctl reload')
elif fast:
kill_gunicorn()
else:
sudo('kill -HUP `cat /srv/newsblur/logs/gunicorn.pid`')
@parallel
def kill():
sudo('supervisorctl reload')
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_gunicorn.sh')
else:
run('./utils/kill_gunicorn.sh')
@parallel
def deploy_node():
pull()
with virtualenv():
run('sudo supervisorctl restart node_unread')
run('sudo supervisorctl restart node_unread_ssl')
run('sudo supervisorctl restart node_favicons')
run('sudo supervisorctl restart node_text')
def gunicorn_restart():
restart_gunicorn()
def restart_gunicorn():
with virtualenv(), settings(warn_only=True):
run('sudo supervisorctl restart gunicorn')
def gunicorn_stop():
with virtualenv(), settings(warn_only=True):
run('sudo supervisorctl stop gunicorn')
def staging():
with cd('~/staging'):
run('git pull')
run('kill -HUP `cat logs/gunicorn.pid`')
run('curl -s http://dev.newsblur.com > /dev/null')
run('curl -s http://dev.newsblur.com/m/ > /dev/null')
def staging_build():
with cd('~/staging'):
run('git pull')
run('./manage.py migrate')
run('kill -HUP `cat logs/gunicorn.pid`')
run('curl -s http://dev.newsblur.com > /dev/null')
run('curl -s http://dev.newsblur.com/m/ > /dev/null')
@parallel
def celery():
celery_slow()
def celery_slow():
with virtualenv():
run('git pull')
celery_stop()
celery_start()
@parallel
def celery_fast():
with virtualenv():
run('git pull')
celery_reload()
@parallel
def celery_stop():
with virtualenv():
sudo('supervisorctl stop celery')
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_celery.sh')
else:
run('./utils/kill_celery.sh')
@parallel
def celery_start():
with virtualenv():
run('sudo supervisorctl start celery')
run('tail logs/newsblur.log')
@parallel
def celery_reload():
with virtualenv():
run('sudo supervisorctl reload celery')
run('tail logs/newsblur.log')
def kill_celery():
with virtualenv():
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_celery.sh')
else:
run('./utils/kill_celery.sh')
def compress_assets(bundle=False):
local('jammit -c newsblur/assets.yml --base-url https://www.newsblur.com --output static')
local('tar -czf static.tgz static/*')
tries_left = 5
while True:
try:
success = False
with settings(warn_only=True):
local('PYTHONPATH=/srv/newsblur python utils/backups/s3.py set static.tgz')
success = True
if not success:
raise Exception("Ack!")
break
except Exception as e:
print(" ***> %s. Trying %s more time%s..." % (e, tries_left, '' if tries_left == 1 else 's'))
tries_left -= 1
if tries_left <= 0: break
def transfer_assets():
# filename = "deploy_%s.tgz" % env.commit # Easy rollback? Eh, can just upload it again.
# run('PYTHONPATH=/srv/newsblur python s3.py get deploy_%s.tgz' % filename)
run('PYTHONPATH=/srv/newsblur python utils/backups/s3.py get static.tgz')
# run('mv %s static/static.tgz' % filename)
run('mv static.tgz static/static.tgz')
run('tar -xzf static/static.tgz')
run('rm -f static/static.tgz')
def cleanup_assets():
local('rm -f static.tgz')
# ===========
# = Backups =
# ===========
def setup_redis_backups(name=None):
# crontab for redis backups, name is either none, story, sessions, pubsub
crontab = ("0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_redis%s.py" %
(("_%s"%name) if name else ""))
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def setup_mongo_backups():
# crontab for mongo backups
crontab = "0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_mongo.py"
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def setup_postgres_backups():
# crontab for postgres backups
crontab = """
0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_psql.py
0 * * * * sudo find /var/lib/postgresql/13/archive -mtime +1 -exec rm {} \;
0 * * * * sudo find /var/lib/postgresql/13/archive -type f -mmin +180 -delete"""
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def backup_redis(name=None):
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_redis%s.py' % (("_%s"%name) if name else ""))
def backup_mongo():
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_mongo.py')
def backup_postgresql():
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_psql.py')
# ===============
# = Calibration =
# ===============
def sync_time():
with settings(warn_only=True):
sudo("/etc/init.d/ntp stop")
sudo("ntpdate pool.ntp.org")
sudo("/etc/init.d/ntp start")
def setup_time_calibration():
sudo('apt-get -y install ntp')
put('config/ntpdate.cron', '%s/' % env.NEWSBLUR_PATH)
sudo('chown root.root %s/ntpdate.cron' % env.NEWSBLUR_PATH)
sudo('chmod 755 %s/ntpdate.cron' % env.NEWSBLUR_PATH)
sudo('mv %s/ntpdate.cron /etc/cron.hourly/ntpdate' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('/etc/cron.hourly/ntpdate')
# ==============
# = Tasks - DB =
# ==============
def restore_postgres(port=5432, download=False):
with virtualenv():
backup_date = '2020-12-03-02-51'
yes = prompt("Dropping and creating NewsBlur PGSQL db. Sure?")
if yes != 'y':
return
if download:
run('mkdir -p postgres')
run('PYTHONPATH=%s python utils/backups/s3.py get postgres/backup_postgresql_%s.sql.gz' % (env.NEWSBLUR_PATH, backup_date))
# sudo('su postgres -c "createuser -p %s -U newsblur"' % (port,))
with settings(warn_only=True):
# May not exist
run('dropdb newsblur -p %s -U newsblur' % (port,), pty=False)
run('sudo -u postgres createuser newsblur -s')
# May already exist
run('createdb newsblur -p %s -O newsblur -U newsblur' % (port,), pty=False)
run('pg_restore -U newsblur -p %s --role=newsblur --dbname=newsblur /srv/newsblur/postgres/backup_postgresql_%s.sql.gz' % (port, backup_date), pty=False)
def restore_mongo(download=False):
backup_date = '2020-11-11-04-00'
if download:
run('PYTHONPATH=/srv/newsblur python utils/backups/s3.py get backup_mongo_%s.tgz' % (backup_date))
run('tar -xf backup_mongo_%s.tgz' % backup_date)
run('mongorestore backup_mongo_%s' % backup_date)
# ======
# = S3 =
# ======
if django_settings:
try:
ACCESS_KEY = django_settings.S3_ACCESS_KEY
SECRET = django_settings.S3_SECRET
BUCKET_NAME = django_settings.S3_BACKUP_BUCKET # Note that you need to create this bucket first
except:
print(" ---> You need to fix django's settings. Enter python and type `import settings`.")
def save_file_in_s3(filename):
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename)
def get_file_from_s3(filename):
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = filename
k.get_contents_to_filename(filename)
def list_backup_in_s3():
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
for i, key in enumerate(bucket.get_all_keys()):
print("[%s] %s" % (i, key.name))
def delete_all_backups():
#FIXME: validate filename exists
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
for i, key in enumerate(bucket.get_all_keys()):
print("deleting %s" % (key.name))
key.delete()
def add_revsys_keys():
put("~/Downloads/revsys-keys.pub", "revsys_keys")
run('cat revsys_keys >> ~/.ssh/authorized_keys')
run('rm revsys_keys')
def upgrade_to_virtualenv(role=None):
if not role:
print(" ---> You must specify a role!")
return
setup_virtualenv()
if role == "task" or role == "search":
celery_stop()
elif role == "app":
gunicorn_stop()
elif role == "node":
run('sudo supervisorctl stop node_unread')
run('sudo supervisorctl stop node_favicons')
elif role == "work":
sudo('/etc/init.d/supervisor stop')
kill_pgbouncer(bounce=False)
setup_installs()
pip()
if role == "task":
enable_celery_supervisor(update=False)
sudo('reboot')
elif role == "app":
setup_gunicorn(supervisor=True, restart=False)
sudo('reboot')
elif role == "node":
deploy_node()
elif role == "search":
setup_db_search()
elif role == "work":
enable_celerybeat()
sudo('reboot')
def benchmark():
run('curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | sudo bash')
sudo('apt-get install -y sysbench')
run('sysbench cpu --cpu-max-prime=20000 run')
run('sysbench fileio --file-total-size=150G prepare')
run('sysbench fileio --file-total-size=150G --file-test-mode=rndrw --time=300 --max-requests=0 run')
run('sysbench fileio --file-total-size=150G cleanup')
| 38.006202 | 204 | 0.631363 | from fabric.api import cd, lcd, env, local, parallel, serial
from fabric.api import put, run, settings, sudo, prefix
from fabric.operations import prompt
from fabric.contrib import django
from fabric.contrib import files
from fabric.state import connections
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.ec2.connection import EC2Connection
import yaml
from pprint import pprint
from collections import defaultdict
from contextlib import contextmanager as _contextmanager
import os
import time
import sys
import re
try:
import digitalocean
except ImportError:
print("Digital Ocean's API not loaded. Install python-digitalocean.")
django.settings_module('newsblur_web.settings')
try:
from django.conf import settings as django_settings
except ImportError:
print(" ---> Django not installed yet.")
django_settings = None
# ============
# = DEFAULTS =
# ============
env.NEWSBLUR_PATH = "/srv/newsblur"
env.SECRETS_PATH = "/srv/secrets-newsblur"
env.VENDOR_PATH = "/srv/code"
env.user = 'sclay'
env.key_filename = os.path.join(env.SECRETS_PATH, 'keys/newsblur.key')
env.connection_attempts = 10
env.do_ip_to_hostname = {}
env.colorize_errors = True
# =========
# = Roles =
# =========
try:
hosts_path = os.path.expanduser(os.path.join(env.SECRETS_PATH, 'configs/hosts.yml'))
roles = yaml.load(open(hosts_path))
for role_name, hosts in list(roles.items()):
if isinstance(hosts, dict):
roles[role_name] = [host for host in list(hosts.keys())]
env.roledefs = roles
except:
print(" ***> No role definitions found in %s. Using default roles." % hosts_path)
env.roledefs = {
'app' : ['app01.newsblur.com'],
'db' : ['db01.newsblur.com'],
'task' : ['task01.newsblur.com'],
}
def do_roledefs(split=False, debug=False):
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
droplets = doapi.get_all_droplets()
env.do_ip_to_hostname = {}
hostnames = {}
for droplet in droplets:
roledef = re.split(r"([0-9]+)", droplet.name)[0]
if roledef not in env.roledefs:
env.roledefs[roledef] = []
if roledef not in hostnames:
hostnames[roledef] = []
if droplet.ip_address not in hostnames[roledef]:
hostnames[roledef].append({'name': droplet.name, 'address': droplet.ip_address})
env.do_ip_to_hostname[droplet.ip_address] = droplet.name
if droplet.ip_address not in env.roledefs[roledef]:
env.roledefs[roledef].append(droplet.ip_address)
if split:
return hostnames
return droplets
def list_do():
droplets = assign_digitalocean_roledefs(split=True)
pprint(droplets)
# Uncomment below to print all IP addresses
# for group in droplets.values():
# for server in group:
# if 'address' in server:
# print(server['address'])
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
droplets = doapi.get_all_droplets()
sizes = doapi.get_all_sizes()
sizes = dict((size.slug, size.price_monthly) for size in sizes)
role_costs = defaultdict(int)
total_cost = 0
for droplet in droplets:
roledef = re.split(r"([0-9]+)", droplet.name)[0]
cost = droplet.size['price_monthly']
role_costs[roledef] += cost
total_cost += cost
print("\n\n Costs:")
pprint(dict(role_costs))
print(" ---> Total cost: $%s/month" % total_cost)
def host(*names):
env.hosts = []
env.doname = ','.join(names)
hostnames = assign_digitalocean_roledefs(split=True)
for role, hosts in list(hostnames.items()):
for host in hosts:
if isinstance(host, dict) and host['name'] in names:
env.hosts.append(host['address'])
print(" ---> Using %s as hosts" % env.hosts)
# ================
# = Environments =
# ================
def server():
env.NEWSBLUR_PATH = "/srv/newsblur"
env.VENDOR_PATH = "/srv/code"
def assign_digitalocean_roledefs(split=False):
server()
droplets = do_roledefs(split=split)
if split:
for roledef, hosts in list(env.roledefs.items()):
if roledef not in droplets:
droplets[roledef] = hosts
return droplets
def app():
assign_digitalocean_roledefs()
env.roles = ['app']
def web():
assign_digitalocean_roledefs()
env.roles = ['app', 'push', 'work', 'search']
def work():
assign_digitalocean_roledefs()
env.roles = ['work']
def www():
assign_digitalocean_roledefs()
env.roles = ['www']
def dev():
assign_digitalocean_roledefs()
env.roles = ['dev']
def debug():
assign_digitalocean_roledefs()
env.roles = ['debug']
def node():
assign_digitalocean_roledefs()
env.roles = ['node']
def push():
assign_digitalocean_roledefs()
env.roles = ['push']
def db():
assign_digitalocean_roledefs()
env.roles = ['db', 'search']
def task():
assign_digitalocean_roledefs()
env.roles = ['task']
def ec2task():
ec2()
env.roles = ['ec2task']
def ec2():
env.user = 'ubuntu'
env.key_filename = ['/Users/sclay/.ec2/sclay.pem']
assign_digitalocean_roledefs()
def all():
assign_digitalocean_roledefs()
env.roles = ['app', 'db', 'debug', 'node', 'push', 'work', 'www', 'search']
# =============
# = Bootstrap =
# =============
def setup_common():
setup_installs()
change_shell()
setup_user()
setup_sudoers()
setup_ulimit()
setup_do_monitoring()
setup_libxml()
setup_psql_client()
setup_repo()
setup_local_files()
setup_time_calibration()
setup_pip()
setup_virtualenv()
setup_repo_local_settings()
pip()
setup_supervisor()
setup_hosts()
setup_pgbouncer()
config_pgbouncer()
setup_mongoengine_repo()
# setup_forked_mongoengine()
# setup_pymongo_repo()
setup_logrotate()
copy_certificates()
setup_nginx()
setup_munin()
def setup_all():
setup_common()
setup_app(skip_common=True)
setup_db(skip_common=True)
setup_task(skip_common=True)
def setup_app_docker(skip_common=False):
if not skip_common:
setup_common()
setup_app_firewall()
setup_motd('app')
change_shell()
setup_user()
setup_sudoers()
setup_ulimit()
setup_do_monitoring()
setup_repo()
setup_local_files()
# setup_time_calibration()
setup_docker()
done()
sudo('reboot')
def setup_app(skip_common=False, node=False):
if not skip_common:
setup_common()
setup_app_firewall()
setup_motd('app')
copy_app_settings()
config_nginx()
setup_gunicorn(supervisor=True)
if node:
setup_node()
deploy_web()
config_monit_app()
setup_usage_monitor()
done()
sudo('reboot')
def setup_app_image():
copy_app_settings()
setup_hosts()
config_pgbouncer()
pull()
pip()
deploy_web()
done()
sudo('reboot')
def setup_node():
setup_node_app()
config_node(full=True)
def setup_db(engine=None, skip_common=False, skip_benchmark=False):
if not skip_common:
setup_common()
setup_db_firewall()
setup_motd('db')
copy_db_settings()
if engine == "postgres":
setup_postgres(standby=False)
setup_postgres_backups()
elif engine == "postgres_slave":
setup_postgres(standby=True)
elif engine and engine.startswith("mongo"):
setup_mongo()
# setup_mongo_mms()
setup_mongo_backups()
elif engine == "redis":
setup_redis()
setup_redis_backups()
setup_redis_monitor()
elif engine == "redis_slave":
setup_redis(slave=True)
setup_redis_monitor()
elif engine == "elasticsearch":
setup_elasticsearch()
setup_db_search()
setup_gunicorn(supervisor=False)
setup_db_munin()
setup_db_monitor()
setup_usage_monitor()
if not skip_benchmark:
benchmark()
done()
# if env.user == 'ubuntu':
# setup_db_mdadm()
def setup_task(queue=None, skip_common=False):
if not skip_common:
setup_common()
setup_task_firewall()
setup_motd('task')
copy_task_settings()
enable_celery_supervisor(queue)
setup_gunicorn(supervisor=False)
config_monit_task()
setup_usage_monitor()
done()
sudo('reboot')
def setup_task_image():
setup_installs()
copy_task_settings()
setup_hosts()
config_pgbouncer()
pull()
pip()
deploy(reload=True)
done()
sudo('reboot')
# ==================
# = Setup - Docker =
# ==================
def setup_docker():
packages = [
'build-essential',
]
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install %s' % ' '.join(packages))
sudo('apt install -fy docker docker-compose')
sudo('usermod -aG docker ${USER}')
sudo('su - ${USER}')
copy_certificates()
# ==================
# = Setup - Common =
# ==================
def done():
print("\n\n\n\n-----------------------------------------------------")
print("\n\n %s / %s IS SUCCESSFULLY BOOTSTRAPPED" % (env.get('doname') or env.host_string, env.host_string))
print("\n\n-----------------------------------------------------\n\n\n\n")
def setup_installs():
packages = [
'build-essential',
'gcc',
'scons',
'libreadline-dev',
'sysstat',
'iotop',
'git',
'python2',
'python2.7-dev',
'locate',
'software-properties-common',
'libpcre3-dev',
'libncurses5-dev',
'libdbd-pg-perl',
'libssl-dev',
'libffi-dev',
'libevent-dev',
'make',
'postgresql-common',
'ssl-cert',
'python-setuptools',
'libyaml-0-2',
'pgbouncer',
'python-yaml',
'python-numpy',
'curl',
'monit',
'ufw',
'libjpeg8',
'libjpeg62-dev',
'libfreetype6',
'libfreetype6-dev',
'libmysqlclient-dev',
'libblas-dev',
'liblapack-dev',
'libatlas-base-dev',
'gfortran',
'libpq-dev',
]
# sudo("sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list")
put("config/apt_sources.conf", "/etc/apt/sources.list", use_sudo=True)
run('sleep 10') # Dies on a lock, so just delay
sudo('apt-get -y update')
run('sleep 10') # Dies on a lock, so just delay
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade')
run('sleep 10') # Dies on a lock, so just delay
sudo('DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install %s' % ' '.join(packages))
with settings(warn_only=True):
sudo("ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib")
sudo("ln -s /usr/lib/x86_64-linux-gnu/libfreetype.so /usr/lib")
sudo("ln -s /usr/lib/x86_64-linux-gnu/libz.so /usr/lib")
with settings(warn_only=True):
sudo('mkdir -p %s' % env.VENDOR_PATH)
sudo('chown %s.%s %s' % (env.user, env.user, env.VENDOR_PATH))
def change_shell():
sudo('apt-get -fy install zsh')
with settings(warn_only=True):
run('git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh')
run('git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting')
sudo('chsh %s -s /bin/zsh' % env.user)
def setup_user():
# run('useradd -c "NewsBlur" -m newsblur -s /bin/zsh')
# run('openssl rand -base64 8 | tee -a ~conesus/.password | passwd -stdin conesus')
run('mkdir -p ~/.ssh && chmod 700 ~/.ssh')
run('rm -fr ~/.ssh/id_dsa*')
run('ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""')
run('touch ~/.ssh/authorized_keys')
put("~/.ssh/id_dsa.pub", "authorized_keys")
run("echo \"\n\" >> ~sclay/.ssh/authorized_keys")
run('echo `cat authorized_keys` >> ~sclay/.ssh/authorized_keys')
run('rm authorized_keys')
def copy_ssh_keys(username='sclay', private=False):
sudo('mkdir -p ~%s/.ssh' % username)
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key.pub'), 'local.key.pub')
sudo('mv local.key.pub ~%s/.ssh/id_rsa.pub' % username)
if private:
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key'), 'local.key')
sudo('mv local.key ~%s/.ssh/id_rsa' % username)
sudo("echo \"\n\" >> ~%s/.ssh/authorized_keys" % username)
sudo("echo `cat ~%s/.ssh/id_rsa.pub` >> ~%s/.ssh/authorized_keys" % (username, username))
sudo('chown -R %s.%s ~%s/.ssh' % (username, username, username))
sudo('chmod 700 ~%s/.ssh' % username)
sudo('chmod 600 ~%s/.ssh/id_rsa*' % username)
def setup_repo():
sudo('mkdir -p /srv')
sudo('chown -R %s.%s /srv' % (env.user, env.user))
with settings(warn_only=True):
run('git clone https://github.com/samuelclay/NewsBlur.git %s' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('ln -sfn /srv/code /home/%s/code' % env.user)
sudo('ln -sfn /srv/newsblur /home/%s/newsblur' % env.user)
def setup_repo_local_settings():
with virtualenv():
run('cp newsblur/local_settings.py.template newsblur/local_settings.py')
run('mkdir -p logs')
run('touch logs/newsblur.log')
def setup_local_files():
run('mkdir -p ~/.config/procps')
put("config/toprc", "~/.config/procps/toprc")
run('rm -f ~/.toprc')
put("config/zshrc", "~/.zshrc")
put('config/gitconfig.txt', '~/.gitconfig')
put('config/ssh.conf', '~/.ssh/config')
def setup_psql_client():
sudo('apt-get -y install postgresql-client')
sudo('mkdir -p /var/run/postgresql')
with settings(warn_only=True):
sudo('chown postgres.postgres /var/run/postgresql')
def setup_libxml():
sudo('apt-get -y install libxml2-dev libxslt1-dev python-lxml')
def setup_libxml_code():
with cd(env.VENDOR_PATH):
run('git clone git://git.gnome.org/libxml2')
run('git clone git://git.gnome.org/libxslt')
with cd(os.path.join(env.VENDOR_PATH, 'libxml2')):
run('./configure && make && sudo make install')
with cd(os.path.join(env.VENDOR_PATH, 'libxslt')):
run('./configure && make && sudo make install')
def setup_psycopg():
sudo('easy_install -U psycopg2')
def setup_virtualenv():
sudo('rm -fr ~/.cache') # Clean `sudo pip`
sudo('pip install --upgrade virtualenv')
sudo('pip install --upgrade virtualenvwrapper')
setup_local_files()
with prefix('WORKON_HOME=%s' % os.path.join(env.NEWSBLUR_PATH, 'venv')):
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with cd(env.NEWSBLUR_PATH):
# sudo('rmvirtualenv newsblur')
# sudo('rm -fr venv')
with settings(warn_only=True):
run('mkvirtualenv newsblur')
# run('echo "import sys; sys.setdefaultencoding(\'utf-8\')" | sudo tee venv/newsblur/lib/python2.7/sitecustomize.py')
# run('echo "/srv/newsblur" | sudo tee venv/newsblur/lib/python2.7/site-packages/newsblur.pth')
@_contextmanager
def virtualenv():
with prefix('WORKON_HOME=%s' % os.path.join(env.NEWSBLUR_PATH, 'venv')):
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with cd(env.NEWSBLUR_PATH):
with prefix('workon newsblur'):
yield
def setup_pip():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('curl https://bootstrap.pypa.io/2.6/get-pip.py | sudo python2')
# sudo('python2 get-pip.py')
@parallel
def pip():
role = role_for_host()
pull()
with virtualenv():
if role == "task":
with settings(warn_only=True):
sudo('fallocate -l 4G /swapfile')
sudo('chmod 600 /swapfile')
sudo('mkswap /swapfile')
sudo('swapon /swapfile')
sudo('chown %s.%s -R %s' % (env.user, env.user, os.path.join(env.NEWSBLUR_PATH, 'venv')))
# run('easy_install -U pip')
# run('pip install --upgrade pip')
# run('pip install --upgrade setuptools')
run('pip install -r requirements.txt')
if role == "task":
with settings(warn_only=True):
sudo('swapoff /swapfile')
def solo_pip(role):
if role == "app":
gunicorn_stop()
pip()
deploy_code(reload=True)
elif role == "task":
celery_stop()
copy_task_settings()
pip()
celery()
def setup_supervisor():
sudo('apt-get update')
sudo('apt-get -y install supervisor')
put('config/supervisord.conf', '/etc/supervisor/supervisord.conf', use_sudo=True)
sudo('/etc/init.d/supervisor stop')
sudo('sleep 2')
sudo('ulimit -n 100000 && /etc/init.d/supervisor start')
sudo("/usr/sbin/update-rc.d -f supervisor defaults")
sudo('systemctl enable supervisor')
sudo('systemctl start supervisor')
@parallel
def setup_hosts():
put(os.path.join(env.SECRETS_PATH, 'configs/hosts'), '/etc/hosts', use_sudo=True)
sudo('echo "\n\n127.0.0.1 `hostname`" | sudo tee -a /etc/hosts')
def setup_pgbouncer():
sudo('apt-get remove -y pgbouncer')
sudo('apt-get install -y libevent-dev pkg-config libc-ares2 libc-ares-dev')
PGBOUNCER_VERSION = '1.15.0'
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('wget https://pgbouncer.github.io/downloads/files/%s/pgbouncer-%s.tar.gz' % (PGBOUNCER_VERSION, PGBOUNCER_VERSION))
run('tar -xzf pgbouncer-%s.tar.gz' % PGBOUNCER_VERSION)
run('rm pgbouncer-%s.tar.gz' % PGBOUNCER_VERSION)
with cd('pgbouncer-%s' % PGBOUNCER_VERSION):
run('./configure --prefix=/usr/local')
run('make')
sudo('make install')
sudo('ln -s /usr/local/bin/pgbouncer /usr/sbin/pgbouncer')
config_pgbouncer()
def config_pgbouncer():
sudo('mkdir -p /etc/pgbouncer')
put('config/pgbouncer.conf', 'pgbouncer.conf')
sudo('mv pgbouncer.conf /etc/pgbouncer/pgbouncer.ini')
put(os.path.join(env.SECRETS_PATH, 'configs/pgbouncer_auth.conf'), 'userlist.txt')
sudo('mv userlist.txt /etc/pgbouncer/userlist.txt')
sudo('echo "START=1" | sudo tee /etc/default/pgbouncer')
# sudo('su postgres -c "/etc/init.d/pgbouncer stop"', pty=False)
with settings(warn_only=True):
sudo('/etc/init.d/pgbouncer stop')
sudo('pkill -9 pgbouncer -e')
run('sleep 2')
sudo('/etc/init.d/pgbouncer start', pty=False)
@parallel
def kill_pgbouncer(stop=False):
# sudo('su postgres -c "/etc/init.d/pgbouncer stop"', pty=False)
with settings(warn_only=True):
sudo('/etc/init.d/pgbouncer stop')
run('sleep 2')
sudo('rm /var/log/postgresql/pgbouncer.pid')
with settings(warn_only=True):
sudo('pkill -9 pgbouncer')
run('sleep 2')
if not stop:
run('sudo /etc/init.d/pgbouncer start', pty=False)
def config_monit_task():
put('config/monit_task.conf', '/etc/monit/conf.d/celery.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_node():
put('config/monit_node.conf', '/etc/monit/conf.d/node.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_original():
put('config/monit_original.conf', '/etc/monit/conf.d/node_original.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_app():
put('config/monit_app.conf', '/etc/monit/conf.d/gunicorn.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_work():
put('config/monit_work.conf', '/etc/monit/conf.d/work.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def config_monit_redis():
sudo('chown root.root /etc/init.d/redis')
sudo('chmod a+x /etc/init.d/redis')
put('config/monit_debug.sh', '/etc/monit/monit_debug.sh', use_sudo=True)
sudo('chmod a+x /etc/monit/monit_debug.sh')
put('config/monit_redis.conf', '/etc/monit/conf.d/redis.conf', use_sudo=True)
sudo('echo "START=yes" | sudo tee /etc/default/monit')
sudo('/etc/init.d/monit restart')
def setup_mongoengine_repo():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('rm -fr mongoengine')
run('git clone https://github.com/MongoEngine/mongoengine.git')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/mongoengine')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/mongoengine-*')
sudo('ln -sfn %s /usr/local/lib/python2.7/dist-packages/mongoengine' %
os.path.join(env.VENDOR_PATH, 'mongoengine/mongoengine'))
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')), settings(warn_only=True):
run('git co v0.8.2')
def clear_pymongo_repo():
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/pymongo*')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/bson*')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/gridfs*')
def setup_pymongo_repo():
with cd(env.VENDOR_PATH), settings(warn_only=True):
run('git clone git://github.com/mongodb/mongo-python-driver.git pymongo')
# with cd(os.path.join(env.VENDOR_PATH, 'pymongo')):
# sudo('python setup.py install')
clear_pymongo_repo()
sudo('ln -sfn %s /usr/local/lib/python2.7/dist-packages/' %
os.path.join(env.VENDOR_PATH, 'pymongo/{pymongo,bson,gridfs}'))
def setup_forked_mongoengine():
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')), settings(warn_only=True):
run('git remote add clay https://github.com/samuelclay/mongoengine.git')
run('git pull')
run('git fetch clay')
run('git checkout -b clay_master clay/master')
def switch_forked_mongoengine():
with cd(os.path.join(env.VENDOR_PATH, 'mongoengine')):
run('git co dev')
run('git pull %s dev --force' % env.user)
# run('git checkout .')
# run('git checkout master')
# run('get branch -D dev')
# run('git checkout -b dev origin/dev')
def setup_logrotate(clear=True):
if clear:
run('find /srv/newsblur/logs/*.log | xargs tee')
with settings(warn_only=True):
sudo('find /var/log/mongodb/*.log | xargs tee')
put('config/logrotate.conf', '/etc/logrotate.d/newsblur', use_sudo=True)
put('config/logrotate.mongo.conf', '/etc/logrotate.d/mongodb', use_sudo=True)
put('config/logrotate.nginx.conf', '/etc/logrotate.d/nginx', use_sudo=True)
sudo('chown root.root /etc/logrotate.d/{newsblur,mongodb,nginx}')
sudo('chmod 644 /etc/logrotate.d/{newsblur,mongodb,nginx}')
with settings(warn_only=True):
sudo('chown sclay.sclay /srv/newsblur/logs/*.log')
sudo('logrotate -f /etc/logrotate.d/newsblur')
sudo('logrotate -f /etc/logrotate.d/nginx')
sudo('logrotate -f /etc/logrotate.d/mongodb')
def setup_ulimit():
# Increase File Descriptor limits.
run('export FILEMAX=`sysctl -n fs.file-max`', pty=False)
sudo('mv /etc/security/limits.conf /etc/security/limits.conf.bak', pty=False)
sudo('touch /etc/security/limits.conf', pty=False)
run('echo "root soft nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "root hard nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "* soft nofile 100000\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "* hard nofile 100090\n" | sudo tee -a /etc/security/limits.conf', pty=False)
run('echo "fs.file-max = 100000\n" | sudo tee -a /etc/sysctl.conf', pty=False)
sudo('sysctl -p')
sudo('ulimit -n 100000')
connections.connect(env.host_string)
# run('touch /home/ubuntu/.bash_profile')
# run('echo "ulimit -n $FILEMAX" >> /home/ubuntu/.bash_profile')
# Increase Ephemeral Ports.
# sudo chmod 666 /etc/sysctl.conf
# echo "net.ipv4.ip_local_port_range = 1024 65535" >> /etc/sysctl.conf
# sudo chmod 644 /etc/sysctl.conf
def setup_do_monitoring():
run('curl -sSL https://agent.digitalocean.com/install.sh | sh')
def setup_syncookies():
sudo('echo 1 | sudo tee /proc/sys/net/ipv4/tcp_syncookies')
sudo('sudo /sbin/sysctl -w net.ipv4.tcp_syncookies=1')
def setup_sudoers(user=None):
sudo('echo "%s ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/sclay' % (user or env.user))
sudo('chmod 0440 /etc/sudoers.d/sclay')
def setup_nginx():
NGINX_VERSION = '1.19.5'
with cd(env.VENDOR_PATH), settings(warn_only=True):
sudo("groupadd nginx")
sudo("useradd -g nginx -d /var/www/htdocs -s /bin/false nginx")
run('wget http://nginx.org/download/nginx-%s.tar.gz' % NGINX_VERSION)
run('tar -xzf nginx-%s.tar.gz' % NGINX_VERSION)
run('rm nginx-%s.tar.gz' % NGINX_VERSION)
with cd('nginx-%s' % NGINX_VERSION):
run('./configure --with-http_ssl_module --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module ')
run('make')
sudo('make install')
config_nginx()
def config_nginx():
put("config/nginx.conf", "/usr/local/nginx/conf/nginx.conf", use_sudo=True)
sudo("mkdir -p /usr/local/nginx/conf/sites-enabled")
sudo("mkdir -p /var/log/nginx")
put("config/nginx.newsblur.conf", "/usr/local/nginx/conf/sites-enabled/newsblur.conf", use_sudo=True)
put("config/nginx-init", "/etc/init.d/nginx", use_sudo=True)
sudo('sed -i -e s/nginx_none/`cat /etc/hostname`/g /usr/local/nginx/conf/sites-enabled/newsblur.conf')
sudo("chmod 0755 /etc/init.d/nginx")
sudo("/usr/sbin/update-rc.d -f nginx defaults")
sudo("/etc/init.d/nginx restart")
copy_certificates()
# ===============
# = Setup - App =
# ===============
def setup_app_firewall():
sudo('ufw default deny')
sudo('ufw allow ssh') # ssh
sudo('ufw allow 80') # http
sudo('ufw allow 8000') # gunicorn
sudo('ufw allow 8888') # socket.io
sudo('ufw allow 8889') # socket.io ssl
sudo('ufw allow 443') # https
sudo('ufw --force enable')
def remove_gunicorn():
with cd(env.VENDOR_PATH):
sudo('rm -fr gunicorn')
def setup_gunicorn(supervisor=True, restart=True):
if supervisor:
put('config/supervisor_gunicorn.conf', '/etc/supervisor/conf.d/gunicorn.conf', use_sudo=True)
sudo('supervisorctl reread')
if restart:
sudo('supervisorctl update')
# with cd(env.VENDOR_PATH):
# sudo('rm -fr gunicorn')
# run('git clone git://github.com/benoitc/gunicorn.git')
# with cd(os.path.join(env.VENDOR_PATH, 'gunicorn')):
# run('git pull')
# sudo('python setup.py develop')
def update_gunicorn():
with cd(os.path.join(env.VENDOR_PATH, 'gunicorn')):
run('git pull')
sudo('python setup.py develop')
def setup_staging():
run('git clone https://github.com/samuelclay/NewsBlur.git staging')
with cd('~/staging'):
run('cp ../newsblur/local_settings.py local_settings.py')
run('mkdir -p logs')
run('touch logs/newsblur.log')
def setup_node_app():
sudo('curl -sL https://deb.nodesource.com/setup_14.x | sudo bash -')
sudo('apt-get install -y nodejs')
# run('curl -L https://npmjs.org/install.sh | sudo sh')
# sudo('apt-get install npm')
sudo('sudo npm install -g npm')
sudo('npm install -g supervisor')
sudo('ufw allow 8888')
sudo('ufw allow 4040')
def config_node(full=False):
sudo('rm -f /etc/supervisor/conf.d/gunicorn.conf')
sudo('rm -f /etc/supervisor/conf.d/node.conf')
put('config/supervisor_node_unread.conf', '/etc/supervisor/conf.d/node_unread.conf', use_sudo=True)
put('config/supervisor_node_unread_ssl.conf', '/etc/supervisor/conf.d/node_unread_ssl.conf', use_sudo=True)
put('config/supervisor_node_favicons.conf', '/etc/supervisor/conf.d/node_favicons.conf', use_sudo=True)
put('config/supervisor_node_text.conf', '/etc/supervisor/conf.d/node_text.conf', use_sudo=True)
if full:
run("rm -fr /srv/newsblur/node/node_modules")
with cd(os.path.join(env.NEWSBLUR_PATH, "node")):
run("npm install")
sudo('supervisorctl reload')
@parallel
def copy_app_settings():
run('rm -f %s/local_settings.py' % env.NEWSBLUR_PATH)
put(os.path.join(env.SECRETS_PATH, 'settings/app_settings.py'),
'%s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
run('echo "\nSERVER_NAME = \\\\"`hostname`\\\\"" >> %s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
def assemble_certificates():
with lcd(os.path.join(env.SECRETS_PATH, 'certificates/comodo')):
local('pwd')
local('cat STAR_newsblur_com.crt EssentialSSLCA_2.crt ComodoUTNSGCCA.crt UTNAddTrustSGCCA.crt AddTrustExternalCARoot.crt > newsblur.com.crt')
def copy_certificates(copy=False):
cert_path = os.path.join(env.NEWSBLUR_PATH, 'config/certificates')
run('mkdir -p %s' % cert_path)
fullchain_path = "/etc/letsencrypt/live/newsblur.com/fullchain.pem"
privkey_path = "/etc/letsencrypt/live/newsblur.com/privkey.pem"
if copy:
sudo('mkdir -p %s' % os.path.dirname(fullchain_path))
put(os.path.join(env.SECRETS_PATH, 'certificates/newsblur.com.pem'), fullchain_path, use_sudo=True)
put(os.path.join(env.SECRETS_PATH, 'certificates/newsblur.com.key'), privkey_path, use_sudo=True)
run('ln -fs %s %s' % (fullchain_path, os.path.join(cert_path, 'newsblur.com.crt')))
run('ln -fs %s %s' % (fullchain_path, os.path.join(cert_path, 'newsblur.com.pem'))) # For backwards compatibility with hard-coded nginx configs
run('ln -fs %s %s' % (privkey_path, os.path.join(cert_path, 'newsblur.com.key')))
run('ln -fs %s %s' % (privkey_path, os.path.join(cert_path, 'newsblur.com.crt.key'))) # HAProxy
put(os.path.join(env.SECRETS_PATH, 'certificates/comodo/dhparams.pem'), cert_path)
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps_development.pem'), cert_path)
# Export aps.cer from Apple issued certificate using Keychain Assistant
# openssl x509 -in aps.cer -inform DER -outform PEM -out aps.pem
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.pem'), cert_path)
# Export aps.p12 from aps.cer using Keychain Assistant
# openssl pkcs12 -in aps.p12 -out aps.p12.pem -nodes
put(os.path.join(env.SECRETS_PATH, 'certificates/ios/aps.p12.pem'), cert_path)
def setup_certbot():
sudo('snap install --classic certbot')
sudo('snap set certbot trust-plugin-with-root=ok')
sudo('snap install certbot-dns-dnsimple')
sudo('ln -fs /snap/bin/certbot /usr/bin/certbot')
put(os.path.join(env.SECRETS_PATH, 'configs/certbot.conf'),
os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
sudo('chmod 0600 %s' % os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
sudo('certbot certonly -n --agree-tos '
' --dns-dnsimple --dns-dnsimple-credentials %s'
' --email samuel@newsblur.com --domains newsblur.com '
' -d "*.newsblur.com" -d "popular.global.newsblur.com"' %
(os.path.join(env.NEWSBLUR_PATH, 'certbot.conf')))
sudo('chmod 0755 /etc/letsencrypt/{live,archive}')
sudo('chmod 0755 /etc/letsencrypt/archive/newsblur.com/privkey1.pem')
# def setup_certbot_old():
# sudo('add-apt-repository -y universe')
# sudo('add-apt-repository -y ppa:certbot/certbot')
# sudo('apt-get update')
# sudo('apt-get install -y certbot')
# sudo('apt-get install -y python3-certbot-dns-dnsimple')
# put(os.path.join(env.SECRETS_PATH, 'configs/certbot.conf'),
# os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
# sudo('chmod 0600 %s' % os.path.join(env.NEWSBLUR_PATH, 'certbot.conf'))
# sudo('certbot certonly -n --agree-tos '
# ' --dns-dnsimple --dns-dnsimple-credentials %s'
# ' --email samuel@newsblur.com --domains newsblur.com '
# ' -d "*.newsblur.com" -d "global.popular.newsblur.com"' %
# (os.path.join(env.NEWSBLUR_PATH, 'certbot.conf')))
# sudo('chmod 0755 /etc/letsencrypt/{live,archive}')
# sudo('chmod 0755 /etc/letsencrypt/archive/newsblur.com/privkey1.pem')
@parallel
def maintenance_on():
role = role_for_host()
if role in ['work', 'search']:
sudo('supervisorctl stop all')
else:
put('templates/maintenance_off.html', '%s/templates/maintenance_off.html' % env.NEWSBLUR_PATH)
with virtualenv():
run('mv templates/maintenance_off.html templates/maintenance_on.html')
@parallel
def maintenance_off():
role = role_for_host()
if role in ['work', 'search']:
sudo('supervisorctl start all')
else:
with virtualenv():
run('mv templates/maintenance_on.html templates/maintenance_off.html')
run('git checkout templates/maintenance_off.html')
def setup_haproxy(debug=False):
version = "2.3.3"
sudo('ufw allow 81') # nginx moved
sudo('ufw allow 1936') # haproxy stats
# sudo('apt-get install -y haproxy')
# sudo('apt-get remove -y haproxy')
with cd(env.VENDOR_PATH):
run('wget http://www.haproxy.org/download/2.3/src/haproxy-%s.tar.gz' % version)
run('tar -xf haproxy-%s.tar.gz' % version)
with cd('haproxy-%s' % version):
run('make TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1')
sudo('make install')
put('config/haproxy-init', '/etc/init.d/haproxy', use_sudo=True)
sudo('chmod u+x /etc/init.d/haproxy')
sudo('mkdir -p /etc/haproxy')
if debug:
put('config/debug_haproxy.conf', '/etc/haproxy/haproxy.cfg', use_sudo=True)
else:
build_haproxy()
put(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'),
'/etc/haproxy/haproxy.cfg', use_sudo=True)
sudo('echo "ENABLED=1" | sudo tee /etc/default/haproxy')
cert_path = "%s/config/certificates" % env.NEWSBLUR_PATH
run('cat %s/newsblur.com.crt > %s/newsblur.pem' % (cert_path, cert_path))
run('cat %s/newsblur.com.key >> %s/newsblur.pem' % (cert_path, cert_path))
run('ln -s %s/newsblur.com.key %s/newsblur.pem.key' % (cert_path, cert_path))
put('config/haproxy_rsyslog.conf', '/etc/rsyslog.d/49-haproxy.conf', use_sudo=True)
# sudo('restart rsyslog')
sudo('update-rc.d -f haproxy defaults')
sudo('/etc/init.d/haproxy stop')
run('sleep 5')
sudo('/etc/init.d/haproxy start')
def config_haproxy(debug=False):
if debug:
put('config/debug_haproxy.conf', '/etc/haproxy/haproxy.cfg', use_sudo=True)
else:
build_haproxy()
put(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'),
'/etc/haproxy/haproxy.cfg', use_sudo=True)
haproxy_check = run('haproxy -c -f /etc/haproxy/haproxy.cfg')
if haproxy_check.return_code == 0:
sudo('/etc/init.d/haproxy reload')
else:
print(" !!!> Uh-oh, HAProxy config doesn't check out: %s" % haproxy_check.return_code)
def build_haproxy():
droplets = assign_digitalocean_roledefs(split=True)
servers = defaultdict(list)
gunicorn_counts_servers = ['app22', 'app26']
gunicorn_refresh_servers = ['app20', 'app21']
maintenance_servers = ['app20']
node_socket3_servers = ['node02', 'node03']
ignore_servers = []
for group_type in ['app', 'push', 'work', 'node_socket', 'node_socket3', 'node_favicon', 'node_text', 'www']:
group_type_name = group_type
if 'node' in group_type:
group_type_name = 'node'
for server in droplets[group_type_name]:
droplet_nums = re.findall(r'\d+', server['name'])
droplet_num = droplet_nums[0] if droplet_nums else ''
server_type = group_type
port = 80
check_inter = 3000
if server['name'] in ignore_servers:
print(" ---> Ignoring %s" % server['name'])
continue
if server['name'] in node_socket3_servers and group_type != 'node_socket3':
continue
if server['name'] not in node_socket3_servers and group_type == 'node_socket3':
continue
if server_type == 'www':
port = 81
if group_type == 'node_socket':
port = 8888
if group_type == 'node_socket3':
port = 8888
if group_type == 'node_text':
port = 4040
if group_type in ['app', 'push']:
port = 8000
address = "%s:%s" % (server['address'], port)
if server_type == 'app':
nginx_address = "%s:80" % (server['address'])
servers['nginx'].append(" server nginx%-15s %-22s check inter 3000ms" % (droplet_num, nginx_address))
if server['name'] in maintenance_servers:
nginx_address = "%s:80" % (server['address'])
servers['maintenance'].append(" server nginx%-15s %-22s check inter 3000ms" % (droplet_num, nginx_address))
if server['name'] in gunicorn_counts_servers:
server_type = 'gunicorn_counts'
check_inter = 15000
elif server['name'] in gunicorn_refresh_servers:
server_type = 'gunicorn_refresh'
check_inter = 30000
server_name = "%s%s" % (server_type, droplet_num)
servers[server_type].append(" server %-20s %-22s check inter %sms" % (server_name, address, check_inter))
h = open(os.path.join(env.NEWSBLUR_PATH, 'config/haproxy.conf.template'), 'r')
haproxy_template = h.read()
for sub, server_list in list(servers.items()):
sorted_servers = '\n'.join(sorted(server_list))
haproxy_template = haproxy_template.replace("{{ %s }}" % sub, sorted_servers)
f = open(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'), 'w')
f.write(haproxy_template)
f.close()
def upgrade_django(role=None):
if not role:
role = role_for_host()
with virtualenv(), settings(warn_only=True):
sudo('sudo dpkg --configure -a')
setup_supervisor()
pull()
run('git co django1.11')
if role == "task":
sudo('supervisorctl stop celery')
run('./utils/kill_celery.sh')
copy_task_settings()
enable_celery_supervisor(update=False)
elif role == "work":
copy_app_settings()
enable_celerybeat()
elif role == "web" or role == "app":
sudo('supervisorctl stop gunicorn')
run('./utils/kill_gunicorn.sh')
copy_app_settings()
setup_gunicorn(restart=False)
elif role == "node":
copy_app_settings()
config_node(full=True)
else:
copy_task_settings()
pip()
clean()
def clean():
with virtualenv(), settings(warn_only=True):
run('find . -name "*.pyc" -exec rm -f {} \;')
def downgrade_django(role=None):
with virtualenv(), settings(warn_only=True):
pull()
run('git co master')
pip()
run('pip uninstall -y django-paypal')
if role == "task":
copy_task_settings()
enable_celery_supervisor()
else:
copy_app_settings()
deploy()
def vendorize_paypal():
with virtualenv(), settings(warn_only=True):
run('pip uninstall -y django-paypal')
def upgrade_pil():
with virtualenv():
pull()
run('pip install --upgrade pillow')
sudo('apt-get remove -y python-imaging')
sudo('supervisorctl reload')
def downgrade_pil():
with virtualenv():
sudo('apt-get install -y python-imaging')
sudo('rm -fr /usr/local/lib/python2.7/dist-packages/Pillow*')
pull()
sudo('supervisorctl reload')
def setup_db_monitor():
pull()
with virtualenv():
sudo('apt-get install -y libpq-dev python2.7-dev')
run('pip install -r flask/requirements.txt')
put('flask/supervisor_db_monitor.conf', '/etc/supervisor/conf.d/db_monitor.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
@parallel
def setup_db_firewall():
ports = [
5432,
27017,
28017,
27019,
6379,
60,
9200,
5000,
]
sudo('ufw --force reset')
sudo('ufw default deny')
sudo('ufw allow ssh')
sudo('ufw allow 80')
sudo('ufw allow 443')
for ip in set(env.roledefs['app'] +
env.roledefs['db'] +
env.roledefs['debug'] +
env.roledefs['task'] +
env.roledefs['work'] +
env.roledefs['push'] +
env.roledefs['www'] +
env.roledefs['search'] +
env.roledefs['node']):
sudo('ufw allow proto tcp from %s to any port %s' % (
ip,
','.join(map(str, ports))
))
sudo('ufw --force enable')
def setup_rabbitmq():
sudo('echo "deb http://www.rabbitmq.com/debian/ testing main" | sudo tee -a /etc/apt/sources.list')
run('wget http://www.rabbitmq.com/rabbitmq-signing-key-public.asc')
sudo('apt-key add rabbitmq-signing-key-public.asc')
run('rm rabbitmq-signing-key-public.asc')
sudo('apt-get update')
sudo('apt-get install -y rabbitmq-server')
sudo('rabbitmqctl add_user newsblur newsblur')
sudo('rabbitmqctl add_vhost newsblurvhost')
sudo('rabbitmqctl set_permissions -p newsblurvhost newsblur ".*" ".*" ".*"')
def setup_postgres(standby=False):
shmmax = 17818362112
hugepages = 9000
sudo('echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list')
sudo('wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -')
sudo('apt update')
sudo('apt install -y postgresql-13')
put('config/postgresql-13.conf', '/etc/postgresql/13/main/postgresql.conf', use_sudo=True)
put('config/postgres_hba-13.conf', '/etc/postgresql/13/main/pg_hba.conf', use_sudo=True)
sudo('mkdir -p /var/lib/postgresql/13/archive')
sudo('chown -R postgres.postgres /etc/postgresql/13/main')
sudo('chown -R postgres.postgres /var/lib/postgresql/13/main')
sudo('chown -R postgres.postgres /var/lib/postgresql/13/archive')
sudo('echo "%s" | sudo tee /proc/sys/kernel/shmmax' % shmmax)
sudo('echo "\nkernel.shmmax = %s" | sudo tee -a /etc/sysctl.conf' % shmmax)
sudo('echo "\nvm.nr_hugepages = %s\n" | sudo tee -a /etc/sysctl.conf' % hugepages)
run('echo "ulimit -n 100000" > postgresql.defaults')
sudo('mv postgresql.defaults /etc/default/postgresql')
sudo('sysctl -p')
sudo('rm -f /lib/systemd/system/postgresql.service')
sudo('systemctl daemon-reload')
sudo('systemctl enable postgresql')
if standby:
put('config/postgresql_recovery.conf', '/var/lib/postgresql/13/recovery.conf', use_sudo=True)
sudo('chown -R postgres.postgres /var/lib/postgresql/13/recovery.conf')
sudo('/etc/init.d/postgresql stop')
sudo('/etc/init.d/postgresql start')
def config_postgres(standby=False):
put('config/postgresql-13.conf', '/etc/postgresql/13/main/postgresql.conf', use_sudo=True)
put('config/postgres_hba.conf', '/etc/postgresql/13/main/pg_hba.conf', use_sudo=True)
sudo('chown postgres.postgres /etc/postgresql/13/main/postgresql.conf')
run('echo "ulimit -n 100000" > postgresql.defaults')
sudo('mv postgresql.defaults /etc/default/postgresql')
sudo('/etc/init.d/postgresql reload 13')
def upgrade_postgres():
sudo('su postgres -c "/usr/lib/postgresql/10/bin/pg_upgrade -b /usr/lib/postgresql/9.4/bin -B /usr/lib/postgresql/10/bin -d /var/lib/postgresql/9.4/main -D /var/lib/postgresql/10/main"')
def copy_postgres_to_standby(master='db01'):
sudo('systemctl stop postgresql')
sudo('mkdir -p /var/lib/postgresql/9.4/archive')
sudo('chown postgres.postgres /var/lib/postgresql/9.4/archive')
with settings(warn_only=True):
sudo('su postgres -c "rsync -Pav -e \'ssh -i ~postgres/.ssh/newsblur.key\' --stats --progress postgres@%s:/var/lib/postgresql/9.4/main /var/lib/postgresql/9.4/ --exclude postmaster.pid"' % master)
put('config/postgresql_recovery.conf', '/var/lib/postgresql/9.4/main/recovery.conf', use_sudo=True)
sudo('systemctl start postgresql')
def disable_thp():
put('config/disable_transparent_hugepages.sh', '/etc/init.d/disable-transparent-hugepages', use_sudo=True)
sudo('chmod 755 /etc/init.d/disable-transparent-hugepages')
sudo('update-rc.d disable-transparent-hugepages defaults')
def setup_mongo():
MONGODB_VERSION = "3.4.24"
pull()
disable_thp()
sudo('systemctl enable rc-local.service') # Enable rc.local
sudo('echo "#!/bin/sh -e\n\nif test -f /sys/kernel/mm/transparent_hugepage/enabled; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/enabled\n\
fi\n\
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/defrag\n\
fi\n\n\
exit 0" | sudo tee /etc/rc.local')
sudo('curl -fsSL https://www.mongodb.org/static/pgp/server-3.4.asc | sudo apt-key add -')
# sudo('echo "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" | sudo tee /etc/apt/sources.list.d/mongodb.list')
# sudo('echo "\ndeb http://downloads-distro.mongodb.org/repo/debian-sysvinit dist 10gen" | sudo tee -a /etc/apt/sources.list')
# sudo('echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list')
sudo('echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.4 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.4.list')
sudo('apt-get update')
sudo('apt-get install -y mongodb-org=%s mongodb-org-server=%s mongodb-org-shell=%s mongodb-org-mongos=%s mongodb-org-tools=%s' %
(MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION, MONGODB_VERSION))
put('config/mongodb.%s.conf' % ('prod' if env.user != 'ubuntu' else 'ec2'),
'/etc/mongodb.conf', use_sudo=True)
put('config/mongodb.service', '/etc/systemd/system/mongodb.service', use_sudo=True)
run('echo "ulimit -n 100000" > mongodb.defaults')
sudo('mv mongodb.defaults /etc/default/mongod')
sudo('mkdir -p /var/log/mongodb')
sudo('chown mongodb /var/log/mongodb')
put('config/logrotate.mongo.conf', '/etc/logrotate.d/mongod', use_sudo=True)
sudo('systemctl enable mongodb')
# Reclaim 5% disk space used for root logs. Set to 1%.
with settings(warn_only=True):
sudo('tune2fs -m 1 /dev/vda1')
def setup_mongo_configsvr():
sudo('mkdir -p /var/lib/mongodb_configsvr')
sudo('chown mongodb.mongodb /var/lib/mongodb_configsvr')
put('config/mongodb.configsvr.conf', '/etc/mongodb.configsvr.conf', use_sudo=True)
put('config/mongodb.configsvr-init', '/etc/init.d/mongodb-configsvr', use_sudo=True)
sudo('chmod u+x /etc/init.d/mongodb-configsvr')
run('echo "ulimit -n 100000" > mongodb_configsvr.defaults')
sudo('mv mongodb_configsvr.defaults /etc/default/mongodb_configsvr')
sudo('update-rc.d -f mongodb-configsvr defaults')
sudo('/etc/init.d/mongodb-configsvr start')
def setup_mongo_mongos():
put('config/mongodb.mongos.conf', '/etc/mongodb.mongos.conf', use_sudo=True)
put('config/mongodb.mongos-init', '/etc/init.d/mongodb-mongos', use_sudo=True)
sudo('chmod u+x /etc/init.d/mongodb-mongos')
run('echo "ulimit -n 100000" > mongodb_mongos.defaults')
sudo('mv mongodb_mongos.defaults /etc/default/mongodb_mongos')
sudo('update-rc.d -f mongodb-mongos defaults')
sudo('/etc/init.d/mongodb-mongos restart')
def setup_mongo_mms():
pull()
sudo('rm -f /etc/supervisor/conf.d/mongomms.conf')
sudo('supervisorctl reread')
sudo('supervisorctl update')
with cd(env.VENDOR_PATH):
sudo('apt-get remove -y mongodb-mms-monitoring-agent')
run('curl -OL https://mms.mongodb.com/download/agent/monitoring/mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
sudo('dpkg -i mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
run('rm mongodb-mms-monitoring-agent_2.2.0.70-1_amd64.deb')
put(os.path.join(env.SECRETS_PATH, 'settings/mongo_mms_config.txt'),
'mongo_mms_config.txt')
sudo("echo \"\n\" | sudo tee -a /etc/mongodb-mms/monitoring-agent.config")
sudo('cat mongo_mms_config.txt | sudo tee -a /etc/mongodb-mms/monitoring-agent.config')
sudo('start mongodb-mms-monitoring-agent')
def setup_redis(slave=False):
redis_version = '3.2.6'
with cd(env.VENDOR_PATH):
run('wget http://download.redis.io/releases/redis-%s.tar.gz' % redis_version)
run('tar -xzf redis-%s.tar.gz' % redis_version)
run('rm redis-%s.tar.gz' % redis_version)
with cd(os.path.join(env.VENDOR_PATH, 'redis-%s' % redis_version)):
sudo('make install')
put('config/redis-init', '/etc/init.d/redis', use_sudo=True)
sudo('chmod u+x /etc/init.d/redis')
put('config/redis.conf', '/etc/redis.conf', use_sudo=True)
if slave:
put('config/redis_slave.conf', '/etc/redis_server.conf', use_sudo=True)
else:
put('config/redis_master.conf', '/etc/redis_server.conf', use_sudo=True)
# sudo('chmod 666 /proc/sys/vm/overcommit_memory', pty=False)
# run('echo "1" > /proc/sys/vm/overcommit_memory', pty=False)
# sudo('chmod 644 /proc/sys/vm/overcommit_memory', pty=False)
disable_thp()
sudo('systemctl enable rc-local.service') # Enable rc.local
sudo('echo "#!/bin/sh -e\n\nif test -f /sys/kernel/mm/transparent_hugepage/enabled; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/enabled\n\
fi\n\
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then\n\
echo never > /sys/kernel/mm/transparent_hugepage/defrag\n\
fi\n\n\
exit 0" | sudo tee /etc/rc.local')
sudo("echo 1 | sudo tee /proc/sys/vm/overcommit_memory")
sudo('echo "vm.overcommit_memory = 1" | sudo tee -a /etc/sysctl.conf')
sudo("sysctl vm.overcommit_memory=1")
put('config/redis_rclocal.txt', '/etc/rc.local', use_sudo=True)
sudo("chown root.root /etc/rc.local")
sudo("chmod a+x /etc/rc.local")
sudo('echo "never" | sudo tee /sys/kernel/mm/transparent_hugepage/enabled')
run('echo "\nnet.core.somaxconn=65535\n" | sudo tee -a /etc/sysctl.conf', pty=False)
sudo('mkdir -p /var/lib/redis')
sudo('update-rc.d redis defaults')
sudo('/etc/init.d/redis stop')
sudo('/etc/init.d/redis start')
setup_syncookies()
config_monit_redis()
def setup_munin():
sudo('apt-get update')
sudo('apt-get install -y munin munin-node munin-plugins-extra spawn-fcgi')
put('config/munin.conf', '/etc/munin/munin.conf', use_sudo=True) # Only use on main munin
put('config/spawn_fcgi_munin_graph.conf', '/etc/init.d/spawn_fcgi_munin_graph', use_sudo=True)
put('config/spawn_fcgi_munin_html.conf', '/etc/init.d/spawn_fcgi_munin_html', use_sudo=True)
sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_graph')
sudo('chmod u+x /etc/init.d/spawn_fcgi_munin_html')
with settings(warn_only=True):
sudo('chown nginx.www-data /var/log/munin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*')
with settings(warn_only=True):
sudo('/etc/init.d/spawn_fcgi_munin_graph stop')
sudo('/etc/init.d/spawn_fcgi_munin_graph start')
sudo('update-rc.d spawn_fcgi_munin_graph defaults')
sudo('/etc/init.d/spawn_fcgi_munin_html stop')
sudo('/etc/init.d/spawn_fcgi_munin_html start')
sudo('update-rc.d spawn_fcgi_munin_html defaults')
sudo('/etc/init.d/munin-node stop')
time.sleep(2)
sudo('/etc/init.d/munin-node start')
with settings(warn_only=True):
sudo('chown nginx.www-data /var/log/munin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/cgi-bin/munin-cgi*')
sudo('chown nginx.www-data /usr/lib/munin/cgi/munin-cgi*')
sudo('chmod a+rw /var/log/munin/*')
with settings(warn_only=True):
sudo('/etc/init.d/spawn_fcgi_munin_graph start')
sudo('/etc/init.d/spawn_fcgi_munin_html start')
def copy_munin_data(from_server):
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key'), '~/.ssh/newsblur.key')
put(os.path.join(env.SECRETS_PATH, 'keys/newsblur.key.pub'), '~/.ssh/newsblur.key.pub')
run('chmod 600 ~/.ssh/newsblur*')
# put("config/munin.nginx.conf", "/usr/local/nginx/conf/sites-enabled/munin.conf", use_sudo=True)
sudo('/etc/init.d/nginx reload')
run("rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/var/lib/munin/ /srv/munin" % from_server)
sudo('rm -fr /var/lib/bak-munin')
sudo("mv /var/lib/munin /var/lib/bak-munin")
sudo("mv /srv/munin /var/lib/")
sudo("chown munin.munin -R /var/lib/munin")
run("sudo rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/etc/munin/ /srv/munin-etc" % from_server)
sudo('rm -fr /etc/munin')
sudo("mv /srv/munin-etc /etc/munin")
sudo("chown munin.munin -R /etc/munin")
run("sudo rsync -az -e \"ssh -i /home/sclay/.ssh/newsblur.key\" --stats --progress %s:/var/cache/munin/www/ /srv/munin-www" % from_server)
sudo('rm -fr /var/cache/munin/www')
sudo("mv /srv/munin-www /var/cache/munin/www")
sudo("chown munin.munin -R /var/cache/munin/www")
sudo("/etc/init.d/munin restart")
sudo("/etc/init.d/munin-node restart")
def setup_db_munin():
sudo('rm -f /etc/munin/plugins/mongo*')
sudo('rm -f /etc/munin/plugins/pg_*')
sudo('rm -f /etc/munin/plugins/redis_*')
sudo('cp -frs %s/config/munin/mongo* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('cp -frs %s/config/munin/pg_* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('cp -frs %s/config/munin/redis_* /etc/munin/plugins/' % env.NEWSBLUR_PATH)
sudo('/etc/init.d/munin-node stop')
time.sleep(2)
sudo('/etc/init.d/munin-node start')
def enable_celerybeat():
with virtualenv():
run('mkdir -p data')
put('config/supervisor_celerybeat.conf', '/etc/supervisor/conf.d/celerybeat.conf', use_sudo=True)
put('config/supervisor_celeryd_work_queue.conf', '/etc/supervisor/conf.d/celeryd_work_queue.conf', use_sudo=True)
put('config/supervisor_celeryd_beat.conf', '/etc/supervisor/conf.d/celeryd_beat.conf', use_sudo=True)
put('config/supervisor_celeryd_beat_feeds.conf', '/etc/supervisor/conf.d/celeryd_beat_feeds.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def setup_db_mdadm():
sudo('apt-get -y install xfsprogs mdadm')
sudo('yes | mdadm --create /dev/md0 --level=0 -c256 --raid-devices=4 /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi')
sudo('mkfs.xfs /dev/md0')
sudo('mkdir -p /srv/db')
sudo('mount -t xfs -o rw,nobarrier,noatime,nodiratime /dev/md0 /srv/db')
sudo('mkdir -p /srv/db/mongodb')
sudo('chown mongodb.mongodb /srv/db/mongodb')
sudo("echo 'DEVICE /dev/xvdf /dev/xvdg /dev/xvdh /dev/xvdi' | sudo tee -a /etc/mdadm/mdadm.conf")
sudo("mdadm --examine --scan | sudo tee -a /etc/mdadm/mdadm.conf")
sudo("echo '/dev/md0 /srv/db xfs rw,nobarrier,noatime,nodiratime,noauto 0 0' | sudo tee -a /etc/fstab")
sudo("sudo update-initramfs -u -v -k `uname -r`")
def setup_original_page_server():
setup_node_app()
sudo('mkdir -p /srv/originals')
sudo('chown %s.%s -R /srv/originals' % (env.user, env.user)) # We assume that the group is the same name as the user. It's common on linux
config_monit_original()
put('config/supervisor_node_original.conf',
'/etc/supervisor/conf.d/node_original.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl reload')
def setup_elasticsearch():
ES_VERSION = "2.4.4"
sudo('add-apt-repository -y ppa:openjdk-r/ppa')
sudo('apt-get update')
sudo('apt-get install openjdk-7-jre -y')
with cd(env.VENDOR_PATH):
run('mkdir -p elasticsearch-%s' % ES_VERSION)
with cd(os.path.join(env.VENDOR_PATH, 'elasticsearch-%s' % ES_VERSION)):
run('wget http://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-%s.deb' % ES_VERSION)
sudo('dpkg -i elasticsearch-%s.deb' % ES_VERSION)
if not files.exists('/usr/share/elasticsearch/plugins/head'):
sudo('/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head')
def setup_db_search():
put('config/supervisor_celeryd_search_indexer.conf', '/etc/supervisor/conf.d/celeryd_search_indexer.conf', use_sudo=True)
put('config/supervisor_celeryd_search_indexer_tasker.conf', '/etc/supervisor/conf.d/celeryd_search_indexer_tasker.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def setup_imageproxy(install_go=False):
if install_go:
with cd(env.VENDOR_PATH):
with settings(warn_only=True):
run('git clone https://github.com/willnorris/imageproxy.git')
run('wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz')
run('tar -xzf go1.13.3.linux-amd64.tar.gz')
run('rm go1.13.3.linux-amd64.tar.gz')
sudo('rm /usr/bin/go')
sudo('ln -s /srv/code/go/bin/go /usr/bin/go')
with cd(os.path.join(env.VENDOR_PATH, 'imageproxy')):
run('go get willnorris.com/go/imageproxy/cmd/imageproxy')
put(os.path.join(env.SECRETS_PATH, 'settings/imageproxy.key'),
'/etc/imageproxy.key', use_sudo=True)
put(os.path.join(env.NEWSBLUR_PATH, 'config/supervisor_imageproxy.conf'), '/etc/supervisor/conf.d/supervisor_imageproxy.conf', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
sudo('ufw allow 443')
sudo('ufw allow 80')
put(os.path.join(env.NEWSBLUR_PATH, 'config/nginx.imageproxy.conf'), "/usr/local/nginx/conf/sites-enabled/imageproxy.conf", use_sudo=True)
sudo("/etc/init.d/nginx restart")
@parallel
def setup_usage_monitor():
sudo('ln -fs %s/utils/monitor_disk_usage.py /etc/cron.daily/monitor_disk_usage' % env.NEWSBLUR_PATH)
sudo('/etc/cron.daily/monitor_disk_usage')
@parallel
def setup_feeds_fetched_monitor():
sudo('ln -fs %s/utils/monitor_task_fetches.py /etc/cron.hourly/monitor_task_fetches' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_task_fetches')
@parallel
def setup_newsletter_monitor():
sudo('ln -fs %s/utils/monitor_newsletter_delivery.py /etc/cron.hourly/monitor_newsletter_delivery' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_newsletter_delivery')
@parallel
def setup_queue_monitor():
sudo('ln -fs %s/utils/monitor_work_queue.py /etc/cron.hourly/monitor_work_queue' % env.NEWSBLUR_PATH)
sudo('/etc/cron.hourly/monitor_work_queue')
@parallel
def setup_redis_monitor():
run('sleep 5')
sudo('ln -fs %s/utils/monitor_redis_bgsave.py /etc/cron.daily/monitor_redis_bgsave' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('/etc/cron.daily/monitor_redis_bgsave')
def setup_task_firewall():
sudo('ufw default deny')
sudo('ufw allow ssh')
sudo('ufw allow 80')
sudo('ufw --force enable')
def setup_motd(role='app'):
motd = '/etc/update-motd.d/22-newsblur-motd'
put('config/motd_%s.txt' % role, motd, use_sudo=True)
sudo('chown root.root %s' % motd)
sudo('chmod a+x %s' % motd)
def enable_celery_supervisor(queue=None, update=True):
if not queue:
put('config/supervisor_celeryd.conf', '/etc/supervisor/conf.d/celeryd.conf', use_sudo=True)
else:
put('config/supervisor_celeryd_%s.conf' % queue, '/etc/supervisor/conf.d/celeryd.conf', use_sudo=True)
sudo('supervisorctl reread')
if update:
sudo('supervisorctl update')
@parallel
def copy_db_settings():
return copy_task_settings()
@parallel
def copy_task_settings():
server_hostname = run('hostname')
host = server_hostname
with settings(warn_only=True):
run('rm -f %s/local_settings.py' % env.NEWSBLUR_PATH)
put(os.path.join(env.SECRETS_PATH, 'settings/task_settings.py'),
'%s/newsblur/local_settings.py' % env.NEWSBLUR_PATH)
run('echo "\nSERVER_NAME = \\\\"%s\\\\"" >> %s/newsblur/local_settings.py' % (host, env.NEWSBLUR_PATH))
@parallel
def copy_spam():
put(os.path.join(env.SECRETS_PATH, 'spam/spam.py'), '%s/apps/social/spam.py' % env.NEWSBLUR_PATH)
DO_SIZES = {
'1': 's-1vcpu-1gb',
'2': 's-1vcpu-2gb',
'4': 's-2vcpu-4gb',
'8': 's-4vcpu-8gb',
'16': 's-6vcpu-16gb',
'32': 's-8vcpu-32gb',
'48': 's-12vcpu-48gb',
'64': 's-16vcpu-64gb',
'32c': 'c-16',
}
def setup_do(name, size=1, image=None):
instance_size = DO_SIZES[str(size)]
doapi = digitalocean.Manager(token=django_settings.DO_TOKEN_FABRIC)
ssh_key_ids = [k.id for k in doapi.get_all_sshkeys()]
if not image:
image = "ubuntu-20-04-x64"
else:
images = dict((s.name, s.id) for s in doapi.get_all_images())
if image == "task":
image = images["task-2018-02"]
elif image == "app":
image = images["app-2018-02"]
else:
images = dict((s.name, s.id) for s in doapi.get_all_images())
print(images)
name = do_name(name)
env.doname = name
print("Creating droplet: %s" % name)
instance = digitalocean.Droplet(token=django_settings.DO_TOKEN_FABRIC,
name=name,
size_slug=instance_size,
image=image,
region='nyc1',
monitoring=True,
private_networking=True,
ssh_keys=ssh_key_ids)
instance.create()
time.sleep(2)
instance = digitalocean.Droplet.get_object(django_settings.DO_TOKEN_FABRIC, instance.id)
print("Booting droplet: %s / %s (size: %s)" % (instance.name, instance.ip_address, instance_size))
i = 0
while True:
if instance.status == 'active':
print("...booted: %s" % instance.ip_address)
time.sleep(5)
break
elif instance.status == 'new':
print(".", end=' ')
sys.stdout.flush()
instance = digitalocean.Droplet.get_object(django_settings.DO_TOKEN_FABRIC, instance.id)
i += 1
time.sleep(i)
else:
print("!!! Error: %s" % instance.status)
return
host = instance.ip_address
env.host_string = host
time.sleep(20)
add_user_to_do()
assign_digitalocean_roledefs()
def do_name(name):
if re.search(r"[0-9]", name):
print(" ---> Using %s as hostname" % name)
return name
else:
hosts = do_roledefs(split=False)
hostnames = [host.name for host in hosts]
existing_hosts = [hostname for hostname in hostnames if name in hostname]
for i in range(1, 100):
try_host = "%s%02d" % (name, i)
if try_host not in existing_hosts:
print(" ---> %s hosts in %s (%s). %s is unused." % (len(existing_hosts), name,
', '.join(existing_hosts), try_host))
return try_host
def add_user_to_do():
env.user = "root"
repo_user = "sclay"
with settings(warn_only=True):
run('useradd -m %s' % (repo_user))
setup_sudoers("%s" % (repo_user))
run('mkdir -p ~%s/.ssh && chmod 700 ~%s/.ssh' % (repo_user, repo_user))
run('rm -fr ~%s/.ssh/id_dsa*' % (repo_user))
run('ssh-keygen -t dsa -f ~%s/.ssh/id_dsa -N ""' % (repo_user))
run('touch ~%s/.ssh/authorized_keys' % (repo_user))
copy_ssh_keys()
run('chown %s.%s -R ~%s/.ssh' % (repo_user, repo_user, repo_user))
env.user = repo_user
def setup_ec2():
AMI_NAME = 'ami-834cf1ea'
INSTANCE_TYPE = 'c1.medium'
conn = EC2Connection(django_settings.AWS_ACCESS_KEY_ID, django_settings.AWS_SECRET_ACCESS_KEY)
reservation = conn.run_instances(AMI_NAME, instance_type=INSTANCE_TYPE,
key_name=env.user,
security_groups=['db-mongo'])
instance = reservation.instances[0]
print("Booting reservation: %s/%s (size: %s)" % (reservation, instance, INSTANCE_TYPE))
i = 0
while True:
if instance.state == 'pending':
print(".", end=' ')
sys.stdout.flush()
instance.update()
i += 1
time.sleep(i)
elif instance.state == 'running':
print("...booted: %s" % instance.public_dns_name)
time.sleep(5)
break
else:
print("!!! Error: %s" % instance.state)
return
host = instance.public_dns_name
env.host_string = host
@parallel
def pull(master=False):
with virtualenv():
run('git pull')
if master:
run('git checkout master')
run('git pull')
def pre_deploy():
compress_assets(bundle=True)
@serial
def post_deploy():
cleanup_assets()
def role_for_host():
for role, hosts in list(env.roledefs.items()):
if env.host in hosts:
return role
@parallel
def deploy(fast=False, reload=False):
role = role_for_host()
if role in ['work', 'search', 'debug']:
deploy_code(copy_assets=False, fast=fast, reload=True)
else:
deploy_code(copy_assets=False, fast=fast, reload=reload)
@parallel
def deploy_web(fast=False):
role = role_for_host()
if role in ['work', 'search']:
deploy_code(copy_assets=True, fast=fast, reload=True)
else:
deploy_code(copy_assets=True, fast=fast)
@parallel
def deploy_rebuild(fast=False):
deploy_code(copy_assets=True, fast=fast, rebuild=True)
@parallel
def kill_gunicorn():
with virtualenv():
sudo('pkill -9 -u %s -f gunicorn_django' % env.user)
@parallel
def deploy_code(copy_assets=False, rebuild=False, fast=False, reload=False):
with virtualenv():
run('git pull')
run('mkdir -p static')
if rebuild:
run('rm -fr static/*')
if copy_assets:
transfer_assets()
with virtualenv():
with settings(warn_only=True):
if reload:
sudo('supervisorctl reload')
elif fast:
kill_gunicorn()
else:
sudo('kill -HUP `cat /srv/newsblur/logs/gunicorn.pid`')
@parallel
def kill():
sudo('supervisorctl reload')
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_gunicorn.sh')
else:
run('./utils/kill_gunicorn.sh')
@parallel
def deploy_node():
pull()
with virtualenv():
run('sudo supervisorctl restart node_unread')
run('sudo supervisorctl restart node_unread_ssl')
run('sudo supervisorctl restart node_favicons')
run('sudo supervisorctl restart node_text')
def gunicorn_restart():
restart_gunicorn()
def restart_gunicorn():
with virtualenv(), settings(warn_only=True):
run('sudo supervisorctl restart gunicorn')
def gunicorn_stop():
with virtualenv(), settings(warn_only=True):
run('sudo supervisorctl stop gunicorn')
def staging():
with cd('~/staging'):
run('git pull')
run('kill -HUP `cat logs/gunicorn.pid`')
run('curl -s http://dev.newsblur.com > /dev/null')
run('curl -s http://dev.newsblur.com/m/ > /dev/null')
def staging_build():
with cd('~/staging'):
run('git pull')
run('./manage.py migrate')
run('kill -HUP `cat logs/gunicorn.pid`')
run('curl -s http://dev.newsblur.com > /dev/null')
run('curl -s http://dev.newsblur.com/m/ > /dev/null')
@parallel
def celery():
celery_slow()
def celery_slow():
with virtualenv():
run('git pull')
celery_stop()
celery_start()
@parallel
def celery_fast():
with virtualenv():
run('git pull')
celery_reload()
@parallel
def celery_stop():
with virtualenv():
sudo('supervisorctl stop celery')
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_celery.sh')
else:
run('./utils/kill_celery.sh')
@parallel
def celery_start():
with virtualenv():
run('sudo supervisorctl start celery')
run('tail logs/newsblur.log')
@parallel
def celery_reload():
with virtualenv():
run('sudo supervisorctl reload celery')
run('tail logs/newsblur.log')
def kill_celery():
with virtualenv():
with settings(warn_only=True):
if env.user == 'ubuntu':
sudo('./utils/kill_celery.sh')
else:
run('./utils/kill_celery.sh')
def compress_assets(bundle=False):
local('jammit -c newsblur/assets.yml --base-url https://www.newsblur.com --output static')
local('tar -czf static.tgz static/*')
tries_left = 5
while True:
try:
success = False
with settings(warn_only=True):
local('PYTHONPATH=/srv/newsblur python utils/backups/s3.py set static.tgz')
success = True
if not success:
raise Exception("Ack!")
break
except Exception as e:
print(" ***> %s. Trying %s more time%s..." % (e, tries_left, '' if tries_left == 1 else 's'))
tries_left -= 1
if tries_left <= 0: break
def transfer_assets():
utils/backups/s3.py get static.tgz')
run('mv static.tgz static/static.tgz')
run('tar -xzf static/static.tgz')
run('rm -f static/static.tgz')
def cleanup_assets():
local('rm -f static.tgz')
def setup_redis_backups(name=None):
crontab = ("0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_redis%s.py" %
(("_%s"%name) if name else ""))
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def setup_mongo_backups():
crontab = "0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_mongo.py"
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def setup_postgres_backups():
crontab = """
0 4 * * * /srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_psql.py
0 * * * * sudo find /var/lib/postgresql/13/archive -mtime +1 -exec rm {} \;
0 * * * * sudo find /var/lib/postgresql/13/archive -type f -mmin +180 -delete"""
run('(crontab -l ; echo "%s") | sort - | uniq - | crontab -' % crontab)
run('crontab -l')
def backup_redis(name=None):
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_redis%s.py' % (("_%s"%name) if name else ""))
def backup_mongo():
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_mongo.py')
def backup_postgresql():
run('/srv/newsblur/venv/newsblur3/bin/python /srv/newsblur/utils/backups/backup_psql.py')
def sync_time():
with settings(warn_only=True):
sudo("/etc/init.d/ntp stop")
sudo("ntpdate pool.ntp.org")
sudo("/etc/init.d/ntp start")
def setup_time_calibration():
sudo('apt-get -y install ntp')
put('config/ntpdate.cron', '%s/' % env.NEWSBLUR_PATH)
sudo('chown root.root %s/ntpdate.cron' % env.NEWSBLUR_PATH)
sudo('chmod 755 %s/ntpdate.cron' % env.NEWSBLUR_PATH)
sudo('mv %s/ntpdate.cron /etc/cron.hourly/ntpdate' % env.NEWSBLUR_PATH)
with settings(warn_only=True):
sudo('/etc/cron.hourly/ntpdate')
def restore_postgres(port=5432, download=False):
with virtualenv():
backup_date = '2020-12-03-02-51'
yes = prompt("Dropping and creating NewsBlur PGSQL db. Sure?")
if yes != 'y':
return
if download:
run('mkdir -p postgres')
run('PYTHONPATH=%s python utils/backups/s3.py get postgres/backup_postgresql_%s.sql.gz' % (env.NEWSBLUR_PATH, backup_date))
with settings(warn_only=True):
run('dropdb newsblur -p %s -U newsblur' % (port,), pty=False)
run('sudo -u postgres createuser newsblur -s')
run('createdb newsblur -p %s -O newsblur -U newsblur' % (port,), pty=False)
run('pg_restore -U newsblur -p %s --role=newsblur --dbname=newsblur /srv/newsblur/postgres/backup_postgresql_%s.sql.gz' % (port, backup_date), pty=False)
def restore_mongo(download=False):
backup_date = '2020-11-11-04-00'
if download:
run('PYTHONPATH=/srv/newsblur python utils/backups/s3.py get backup_mongo_%s.tgz' % (backup_date))
run('tar -xf backup_mongo_%s.tgz' % backup_date)
run('mongorestore backup_mongo_%s' % backup_date)
if django_settings:
try:
ACCESS_KEY = django_settings.S3_ACCESS_KEY
SECRET = django_settings.S3_SECRET
BUCKET_NAME = django_settings.S3_BACKUP_BUCKET
except:
print(" ---> You need to fix django's settings. Enter python and type `import settings`.")
def save_file_in_s3(filename):
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename)
def get_file_from_s3(filename):
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = filename
k.get_contents_to_filename(filename)
def list_backup_in_s3():
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
for i, key in enumerate(bucket.get_all_keys()):
print("[%s] %s" % (i, key.name))
def delete_all_backups():
#FIXME: validate filename exists
conn = S3Connection(ACCESS_KEY, SECRET)
bucket = conn.get_bucket(BUCKET_NAME)
for i, key in enumerate(bucket.get_all_keys()):
print("deleting %s" % (key.name))
key.delete()
def add_revsys_keys():
put("~/Downloads/revsys-keys.pub", "revsys_keys")
run('cat revsys_keys >> ~/.ssh/authorized_keys')
run('rm revsys_keys')
def upgrade_to_virtualenv(role=None):
if not role:
print(" ---> You must specify a role!")
return
setup_virtualenv()
if role == "task" or role == "search":
celery_stop()
elif role == "app":
gunicorn_stop()
elif role == "node":
run('sudo supervisorctl stop node_unread')
run('sudo supervisorctl stop node_favicons')
elif role == "work":
sudo('/etc/init.d/supervisor stop')
kill_pgbouncer(bounce=False)
setup_installs()
pip()
if role == "task":
enable_celery_supervisor(update=False)
sudo('reboot')
elif role == "app":
setup_gunicorn(supervisor=True, restart=False)
sudo('reboot')
elif role == "node":
deploy_node()
elif role == "search":
setup_db_search()
elif role == "work":
enable_celerybeat()
sudo('reboot')
def benchmark():
run('curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | sudo bash')
sudo('apt-get install -y sysbench')
run('sysbench cpu --cpu-max-prime=20000 run')
run('sysbench fileio --file-total-size=150G prepare')
run('sysbench fileio --file-total-size=150G --file-test-mode=rndrw --time=300 --max-requests=0 run')
run('sysbench fileio --file-total-size=150G cleanup')
| true | true |
1c35d8dcaf872e8dc63e0b7c8949ce04feb59a02 | 10,757 | py | Python | models/resnet_2p1d.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | models/resnet_2p1d.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | models/resnet_2p1d.py | kk2487/3dresnet | d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94 | [
"MIT"
] | null | null | null | import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_inplanes():
return [64, 128, 256, 512]
def conv1x3x3(in_planes, mid_planes, stride=1):
return nn.Conv3d(in_planes,
mid_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
bias=False)
def conv3x1x1(mid_planes, planes, stride=1):
return nn.Conv3d(mid_planes,
planes,
kernel_size=(3, 1, 1),
stride=(stride, 1, 1),
padding=(1, 0, 0),
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
n_3d_parameters1 = in_planes * planes * 3 * 3 * 3
n_2p1d_parameters1 = in_planes * 3 * 3 + 3 * planes
mid_planes1 = n_3d_parameters1 // n_2p1d_parameters1
self.conv1_s = conv1x3x3(in_planes, mid_planes1, stride)
self.bn1_s = nn.BatchNorm3d(mid_planes1)
self.conv1_t = conv3x1x1(mid_planes1, planes, stride)
self.bn1_t = nn.BatchNorm3d(planes)
n_3d_parameters2 = planes * planes * 3 * 3 * 3
n_2p1d_parameters2 = planes * 3 * 3 + 3 * planes
mid_planes2 = n_3d_parameters2 // n_2p1d_parameters2
self.conv2_s = conv1x3x3(planes, mid_planes2)
self.bn2_s = nn.BatchNorm3d(mid_planes2)
self.conv2_t = conv3x1x1(mid_planes2, planes)
self.bn2_t = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1_s(x)
out = self.bn1_s(out)
out = self.relu(out)
out = self.conv1_t(out)
out = self.bn1_t(out)
out = self.relu(out)
out = self.conv2_s(out)
out = self.bn2_s(out)
out = self.relu(out)
out = self.conv2_t(out)
out = self.bn2_t(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
n_3d_parameters = planes * planes * 3 * 3 * 3
n_2p1d_parameters = planes * 3 * 3 + 3 * planes
mid_planes = n_3d_parameters // n_2p1d_parameters
self.conv2_s = conv1x3x3(planes, mid_planes, stride)
self.bn2_s = nn.BatchNorm3d(mid_planes)
self.conv2_t = conv3x1x1(mid_planes, planes, stride)
self.bn2_t = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2_s(out)
out = self.bn2_s(out)
out = self.relu(out)
out = self.conv2_t(out)
out = self.bn2_t(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes,
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=700,
sample_size=112,
sample_duration=15):
#super().__init__()
super(ResNet, self).__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
print(block_inplanes)
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
n_3d_parameters = 3 * self.in_planes * conv1_t_size * 7 * 7
n_2p1d_parameters = 3 * 7 * 7 + conv1_t_size * self.in_planes
mid_planes = n_3d_parameters // n_2p1d_parameters
self.conv1_s = nn.Conv3d(n_input_channels,
mid_planes,
kernel_size=(1, 7, 7),
stride=(1, 2, 2),
padding=(0, 3, 3),
bias=False)
self.bn1_s = nn.BatchNorm3d(mid_planes)
self.conv1_t = nn.Conv3d(mid_planes,
self.in_planes,
kernel_size=(conv1_t_size, 1, 1),
stride=(conv1_t_stride, 1, 1),
padding=(conv1_t_size // 2, 0, 0),
bias=False)
self.bn1_t = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1_s(x)
x = self.bn1_s(x)
x = self.relu(x)
x = self.conv1_t(x)
x = self.bn1_t(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# Can print the model structure
def model_info(model, report='summary'):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if report is 'full':
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)
model_info(model,'full')
return model
def generate_model(model_depth, **kwargs):
assert model_depth in [10, 18, 34, 50, 101, 152, 200]
if model_depth == 10:
model = ResNet(BasicBlock, [1, 1, 1, 1], get_inplanes(), **kwargs)
elif model_depth == 18:
model = ResNet(BasicBlock, [2, 2, 2, 2], get_inplanes(), **kwargs)
elif model_depth == 34:
model = ResNet(BasicBlock, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 50:
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 101:
model = ResNet(Bottleneck, [3, 4, 23, 3], get_inplanes(), **kwargs)
elif model_depth == 152:
model = ResNet(Bottleneck, [3, 8, 36, 3], get_inplanes(), **kwargs)
elif model_depth == 200:
model = ResNet(Bottleneck, [3, 24, 36, 3], get_inplanes(), **kwargs)
return model | 34.925325 | 119 | 0.525704 | import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_inplanes():
return [64, 128, 256, 512]
def conv1x3x3(in_planes, mid_planes, stride=1):
return nn.Conv3d(in_planes,
mid_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, 1, 1),
bias=False)
def conv3x1x1(mid_planes, planes, stride=1):
return nn.Conv3d(mid_planes,
planes,
kernel_size=(3, 1, 1),
stride=(stride, 1, 1),
padding=(1, 0, 0),
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
n_3d_parameters1 = in_planes * planes * 3 * 3 * 3
n_2p1d_parameters1 = in_planes * 3 * 3 + 3 * planes
mid_planes1 = n_3d_parameters1 // n_2p1d_parameters1
self.conv1_s = conv1x3x3(in_planes, mid_planes1, stride)
self.bn1_s = nn.BatchNorm3d(mid_planes1)
self.conv1_t = conv3x1x1(mid_planes1, planes, stride)
self.bn1_t = nn.BatchNorm3d(planes)
n_3d_parameters2 = planes * planes * 3 * 3 * 3
n_2p1d_parameters2 = planes * 3 * 3 + 3 * planes
mid_planes2 = n_3d_parameters2 // n_2p1d_parameters2
self.conv2_s = conv1x3x3(planes, mid_planes2)
self.bn2_s = nn.BatchNorm3d(mid_planes2)
self.conv2_t = conv3x1x1(mid_planes2, planes)
self.bn2_t = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1_s(x)
out = self.bn1_s(out)
out = self.relu(out)
out = self.conv1_t(out)
out = self.bn1_t(out)
out = self.relu(out)
out = self.conv2_s(out)
out = self.bn2_s(out)
out = self.relu(out)
out = self.conv2_t(out)
out = self.bn2_t(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
n_3d_parameters = planes * planes * 3 * 3 * 3
n_2p1d_parameters = planes * 3 * 3 + 3 * planes
mid_planes = n_3d_parameters // n_2p1d_parameters
self.conv2_s = conv1x3x3(planes, mid_planes, stride)
self.bn2_s = nn.BatchNorm3d(mid_planes)
self.conv2_t = conv3x1x1(mid_planes, planes, stride)
self.bn2_t = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2_s(out)
out = self.bn2_s(out)
out = self.relu(out)
out = self.conv2_t(out)
out = self.bn2_t(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes,
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=700,
sample_size=112,
sample_duration=15):
super(ResNet, self).__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
print(block_inplanes)
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
n_3d_parameters = 3 * self.in_planes * conv1_t_size * 7 * 7
n_2p1d_parameters = 3 * 7 * 7 + conv1_t_size * self.in_planes
mid_planes = n_3d_parameters // n_2p1d_parameters
self.conv1_s = nn.Conv3d(n_input_channels,
mid_planes,
kernel_size=(1, 7, 7),
stride=(1, 2, 2),
padding=(0, 3, 3),
bias=False)
self.bn1_s = nn.BatchNorm3d(mid_planes)
self.conv1_t = nn.Conv3d(mid_planes,
self.in_planes,
kernel_size=(conv1_t_size, 1, 1),
stride=(conv1_t_stride, 1, 1),
padding=(conv1_t_size // 2, 0, 0),
bias=False)
self.bn1_t = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1_s(x)
x = self.bn1_s(x)
x = self.relu(x)
x = self.conv1_t(x)
x = self.bn1_t(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def model_info(model, report='summary'):
n_p = sum(x.numel() for x in model.parameters())
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad)
if report is 'full':
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))
def resnet50(**kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)
model_info(model,'full')
return model
def generate_model(model_depth, **kwargs):
assert model_depth in [10, 18, 34, 50, 101, 152, 200]
if model_depth == 10:
model = ResNet(BasicBlock, [1, 1, 1, 1], get_inplanes(), **kwargs)
elif model_depth == 18:
model = ResNet(BasicBlock, [2, 2, 2, 2], get_inplanes(), **kwargs)
elif model_depth == 34:
model = ResNet(BasicBlock, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 50:
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 101:
model = ResNet(Bottleneck, [3, 4, 23, 3], get_inplanes(), **kwargs)
elif model_depth == 152:
model = ResNet(Bottleneck, [3, 8, 36, 3], get_inplanes(), **kwargs)
elif model_depth == 200:
model = ResNet(Bottleneck, [3, 24, 36, 3], get_inplanes(), **kwargs)
return model | true | true |
1c35d95a84884bcda70944a0f090125b57b980cd | 4,252 | py | Python | Fig5b_lipid_droplets_area.py | MortisHuang/VIFFI-image-analysis | ad144970e9cb53d61119dd96370157251c03cc07 | [
"MIT"
] | null | null | null | Fig5b_lipid_droplets_area.py | MortisHuang/VIFFI-image-analysis | ad144970e9cb53d61119dd96370157251c03cc07 | [
"MIT"
] | null | null | null | Fig5b_lipid_droplets_area.py | MortisHuang/VIFFI-image-analysis | ad144970e9cb53d61119dd96370157251c03cc07 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 18:35:15 2019
@author: Mortis
"""
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import glob
import os
from PIL import Image
import pandas as pd
import copy
import datetime
#%% Set the output file location
run_data = datetime.datetime.now().strftime("%Y_%m_%d")
result_path=r"./Fig5_{}/".format(run_data)
if not os.path.exists(result_path):
os.makedirs(result_path)
#%%
numberofdata=12000
Area_of_Nm=np.zeros(numberofdata)
Area_of_Np=np.zeros(numberofdata)
Area_of_All=np.zeros((numberofdata,2))
Peaks_of_Nm=np.zeros(numberofdata)
Peaks_of_Np=np.zeros(numberofdata)
entry1=r'.\Euglena\N-'
entry2=r'.\Euglena\N+'
fnamelist1 = glob.glob(os.path.join(entry1, '*.tif'))
fnamelist2 = glob.glob(os.path.join(entry2, '*.tif'))
r=3
index=0
for filename in fnamelist1[:numberofdata]:
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<5]=0
image = imarray[:,:,1]
neighborhood_size = 10
threshold = 25
data = imarray[:,:,1]
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
if len(x)>0:
Peaks_of_Nm[index]=len(x)
new_imarray=imarray
total_area=0
for i in range(0,len(x)):
y2=int(y[i])
x2=int(x[i])
#Find Area
averg=np.mean(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
#Mark the Area
new_imarray[y2-r:y2+r,x2-r:x2+r,1][new_imarray[y2-r:y2+r,x2-r:x2+r,1]>=averg]=255
cut = copy.deepcopy(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
cut[cut<255]=0
# print('Peak {} : X:{} Y:{} Area:{}'.format(i,x[i],y[i],np.sum(cut==255)))
total_area+=np.sum(cut==255)
else:
total_area=1
# print('Total_Area:{}'.format(total_area))
Area_of_Nm[index]=total_area
Area_of_All[index,0]=total_area
index+=1
index=0
for filename in fnamelist2[:numberofdata]:
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<5]=0
image = imarray[:,:,1]
neighborhood_size = 10
threshold = 25
data = imarray[:,:,1]
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
if len(x)>0:
Peaks_of_Np[index]=len(x)
new_imarray=imarray
total_area=0
for i in range(0,len(x)):
y2=int(y[i])
x2=int(x[i])
#Find Area
averg=np.mean(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
#Mark the Area
new_imarray[y2-r:y2+r,x2-r:x2+r,1][new_imarray[y2-r:y2+r,x2-r:x2+r,1]>=averg]=255
cut = copy.deepcopy(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
cut[cut<255]=0
# print('Peak {} : X:{} Y:{} Area:{}'.format(i,x[i],y[i],np.sum(cut==255)))
total_area+=np.sum(cut==255)
else:
total_area=1
# print('Total_Area:{}'.format(total_area))
Area_of_Np[index]=total_area
Area_of_All[index,1]=total_area
index+=1
#%%
Area_of_All_df = pd.DataFrame(Area_of_All)
Area_of_All_df.columns = ['N-','N+']
writer = pd.ExcelWriter('{}Fig_5b_Lipid_Drpolet_Area.xlsx'.format(result_path))
Area_of_All_df.to_excel(writer,'Intensity',float_format='%.5f')
writer.save() | 33.480315 | 94 | 0.599718 |
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import glob
import os
from PIL import Image
import pandas as pd
import copy
import datetime
run_data = datetime.datetime.now().strftime("%Y_%m_%d")
result_path=r"./Fig5_{}/".format(run_data)
if not os.path.exists(result_path):
os.makedirs(result_path)
numberofdata=12000
Area_of_Nm=np.zeros(numberofdata)
Area_of_Np=np.zeros(numberofdata)
Area_of_All=np.zeros((numberofdata,2))
Peaks_of_Nm=np.zeros(numberofdata)
Peaks_of_Np=np.zeros(numberofdata)
entry1=r'.\Euglena\N-'
entry2=r'.\Euglena\N+'
fnamelist1 = glob.glob(os.path.join(entry1, '*.tif'))
fnamelist2 = glob.glob(os.path.join(entry2, '*.tif'))
r=3
index=0
for filename in fnamelist1[:numberofdata]:
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<5]=0
image = imarray[:,:,1]
neighborhood_size = 10
threshold = 25
data = imarray[:,:,1]
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
if len(x)>0:
Peaks_of_Nm[index]=len(x)
new_imarray=imarray
total_area=0
for i in range(0,len(x)):
y2=int(y[i])
x2=int(x[i])
averg=np.mean(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
new_imarray[y2-r:y2+r,x2-r:x2+r,1][new_imarray[y2-r:y2+r,x2-r:x2+r,1]>=averg]=255
cut = copy.deepcopy(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
cut[cut<255]=0
total_area+=np.sum(cut==255)
else:
total_area=1
Area_of_Nm[index]=total_area
Area_of_All[index,0]=total_area
index+=1
index=0
for filename in fnamelist2[:numberofdata]:
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<5]=0
image = imarray[:,:,1]
neighborhood_size = 10
threshold = 25
data = imarray[:,:,1]
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
if len(x)>0:
Peaks_of_Np[index]=len(x)
new_imarray=imarray
total_area=0
for i in range(0,len(x)):
y2=int(y[i])
x2=int(x[i])
averg=np.mean(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
new_imarray[y2-r:y2+r,x2-r:x2+r,1][new_imarray[y2-r:y2+r,x2-r:x2+r,1]>=averg]=255
cut = copy.deepcopy(new_imarray[y2-r:y2+r,x2-r:x2+r,1])
cut[cut<255]=0
total_area+=np.sum(cut==255)
else:
total_area=1
Area_of_Np[index]=total_area
Area_of_All[index,1]=total_area
index+=1
Area_of_All_df = pd.DataFrame(Area_of_All)
Area_of_All_df.columns = ['N-','N+']
writer = pd.ExcelWriter('{}Fig_5b_Lipid_Drpolet_Area.xlsx'.format(result_path))
Area_of_All_df.to_excel(writer,'Intensity',float_format='%.5f')
writer.save() | true | true |
1c35dade20cbdab22715eba330944a249b47f391 | 3,150 | py | Python | scripts/writing/hyperopt-model-tables.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | 8 | 2020-10-22T10:15:29.000Z | 2021-09-15T08:11:34.000Z | scripts/writing/hyperopt-model-tables.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | 3 | 2021-07-04T06:14:53.000Z | 2021-11-09T03:07:16.000Z | scripts/writing/hyperopt-model-tables.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | null | null | null | # This generates model hyperopt tables for Appendix
import yaml
import pandas as pd
from attacut import utils
OUTPUT = "./writing/tables/hyperopt-model-tables.tex"
table = r"""
\begin{table*}
\centering
\begin{tabular}{lc}
\toprule
\textbf{Average training duration} & %(avg_training).0f minutes \\
\textbf{Average validation word-level $F_1$} & %(avg_val_f1)2.2f$\pm$%(std_val_f1).2f\%% \\
\textbf{Best validation word-level $F_1$} & %(best_val_f1)2.2f\%% \\
\textbf{Best model's number of trainable parameters} & %(best_num_params)s \\
\bottomrule
\end{tabular}
\begin{tabular}{rccc}
\toprule
\textbf{Hyperparameter} & \textbf{Search Space} & \textbf{Best Assignment} \\
learning rate & \textit{loguniform(1e-4, 1e-3)}& %(lr).2e \\
weight decay & \textit{loguniform(1e-6, 1e-3)} & %(weight_decay).2e \\
%(family_params)s
\bottomrule
\end{tabular}
\caption{Best hyperparameter and search space for %(name)s.}
\label{tab:appendix-hyperopt-%(ref)s}
\end{table*}
"""
family_specific_param = {
"ID-CNN": r"""
convolution filters & \textit{uniform-interger(128, 256)} & %(conv)d \\
linear layer & \textit{uniform-interger(16, 48)} & %(l1)d \\
dropout & \textit{uniform(0, 0.5)} & %(do).4f \\
""",
"BiLSTM": r"""
LSTM cells & \textit{uniform-interger(128, 512)} & %(cells)d \\
linear layer & \textit{uniform-interger(16, 48)} & %(l1)d \\
dropout & \textit{uniform(0, 0.5)} & %(do).4f \\
""",
}
if __name__ == "__main__":
with open("./hyperopt-results.yml", "r") as fh, open(OUTPUT, "w") as fw:
data = yaml.safe_load(fh)
for i, row in enumerate(data):
path = row["path"]
df = pd.read_csv(path)
print(f"loading {path}")
max_val_f1 = df["best-val:word_level:f1"].max()
best_model = df[df["best-val:word_level:f1"] == max_val_f1].to_dict("row")[0]
arch_config = utils.parse_model_params(best_model["params"])
if "ID-CNN-XL" in row["name"]:
fam_param_tmp = family_specific_param["ID-CNN-XL"]
elif "ID-CNN" in row["name"]:
fam_param_tmp = family_specific_param["ID-CNN"]
elif "BiLSTM-XL" in row["name"]:
fam_param_tmp = family_specific_param["BiLSTM-XL"]
elif "BiLSTM" in row["name"]:
fam_param_tmp = family_specific_param["BiLSTM"]
else:
raise ValueError(row["name"], "doesn't exist!")
fam_param = fam_param_tmp % arch_config
tt = table % dict(
best_val_f1=max_val_f1*100,
best_num_params="{:,}".format(best_model["num_trainable_params"]),
avg_training=(df["training_took"] / 60).mean(),
avg_val_f1=(df["best-val:word_level:f1"]).mean() * 100,
std_val_f1=(df["best-val:word_level:f1"]).std() * 100,
name=row["name"],
lr=best_model["lr"],
weight_decay=best_model["weight_decay"],
family_params=fam_param,
ref="last" if i == len(data)-1 else i
)
fw.write(f"{tt} \n\n\n") | 35.393258 | 92 | 0.590794 |
import yaml
import pandas as pd
from attacut import utils
OUTPUT = "./writing/tables/hyperopt-model-tables.tex"
table = r"""
\begin{table*}
\centering
\begin{tabular}{lc}
\toprule
\textbf{Average training duration} & %(avg_training).0f minutes \\
\textbf{Average validation word-level $F_1$} & %(avg_val_f1)2.2f$\pm$%(std_val_f1).2f\%% \\
\textbf{Best validation word-level $F_1$} & %(best_val_f1)2.2f\%% \\
\textbf{Best model's number of trainable parameters} & %(best_num_params)s \\
\bottomrule
\end{tabular}
\begin{tabular}{rccc}
\toprule
\textbf{Hyperparameter} & \textbf{Search Space} & \textbf{Best Assignment} \\
learning rate & \textit{loguniform(1e-4, 1e-3)}& %(lr).2e \\
weight decay & \textit{loguniform(1e-6, 1e-3)} & %(weight_decay).2e \\
%(family_params)s
\bottomrule
\end{tabular}
\caption{Best hyperparameter and search space for %(name)s.}
\label{tab:appendix-hyperopt-%(ref)s}
\end{table*}
"""
family_specific_param = {
"ID-CNN": r"""
convolution filters & \textit{uniform-interger(128, 256)} & %(conv)d \\
linear layer & \textit{uniform-interger(16, 48)} & %(l1)d \\
dropout & \textit{uniform(0, 0.5)} & %(do).4f \\
""",
"BiLSTM": r"""
LSTM cells & \textit{uniform-interger(128, 512)} & %(cells)d \\
linear layer & \textit{uniform-interger(16, 48)} & %(l1)d \\
dropout & \textit{uniform(0, 0.5)} & %(do).4f \\
""",
}
if __name__ == "__main__":
with open("./hyperopt-results.yml", "r") as fh, open(OUTPUT, "w") as fw:
data = yaml.safe_load(fh)
for i, row in enumerate(data):
path = row["path"]
df = pd.read_csv(path)
print(f"loading {path}")
max_val_f1 = df["best-val:word_level:f1"].max()
best_model = df[df["best-val:word_level:f1"] == max_val_f1].to_dict("row")[0]
arch_config = utils.parse_model_params(best_model["params"])
if "ID-CNN-XL" in row["name"]:
fam_param_tmp = family_specific_param["ID-CNN-XL"]
elif "ID-CNN" in row["name"]:
fam_param_tmp = family_specific_param["ID-CNN"]
elif "BiLSTM-XL" in row["name"]:
fam_param_tmp = family_specific_param["BiLSTM-XL"]
elif "BiLSTM" in row["name"]:
fam_param_tmp = family_specific_param["BiLSTM"]
else:
raise ValueError(row["name"], "doesn't exist!")
fam_param = fam_param_tmp % arch_config
tt = table % dict(
best_val_f1=max_val_f1*100,
best_num_params="{:,}".format(best_model["num_trainable_params"]),
avg_training=(df["training_took"] / 60).mean(),
avg_val_f1=(df["best-val:word_level:f1"]).mean() * 100,
std_val_f1=(df["best-val:word_level:f1"]).std() * 100,
name=row["name"],
lr=best_model["lr"],
weight_decay=best_model["weight_decay"],
family_params=fam_param,
ref="last" if i == len(data)-1 else i
)
fw.write(f"{tt} \n\n\n") | true | true |
1c35dafc3b8f2e33945c74a548ca881860fad516 | 2,006 | py | Python | test/test_min_max_stack.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | test/test_min_max_stack.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | test/test_min_max_stack.py | kisliakovsky/structures | 19969470a7e9b150b077082cc8ca0c2fc9be279e | [
"MIT"
] | null | null | null | from unittest import TestCase
from src.stack import MinMaxStack
class TestMinMaxStack(TestCase):
def test_push(self):
stack = MinMaxStack[int]()
stack.push((None, None))
stack.push((1, None))
stack.push((None, 4))
stack.push((2, None))
stack.push((None, 3))
self.assertEqual([(None, None), (1, None), (1, 4), (2, 4), (2, 3)], stack.as_list())
def test_pop_and_push(self):
stack = MinMaxStack[int]()
stack.pop_and_push((None, None))
stack.pop_and_push((1, None))
stack.pop_and_push((None, 4))
stack.pop_and_push((2, None))
stack.pop_and_push((None, 3))
self.assertEqual([(2, 3)], stack.as_list())
def test_pop(self):
stack = MinMaxStack[int]()
stack.push((None, None))
stack.push((1, None))
stack.push((None, 4))
stack.push((2, None))
stack.push((None, 3))
self.assertEqual((2, 3), stack.pop())
self.assertEqual((2, 4), stack.pop())
self.assertEqual((1, 4), stack.pop())
self.assertEqual((1, None), stack.pop())
self.assertEqual((None, None), stack.pop())
with self.assertRaises(IndexError):
stack.pop()
def test_peek(self):
stack = MinMaxStack[int]()
with self.assertRaises(IndexError):
stack.peek()
stack.push((2, 3))
self.assertEqual((2, 3), stack.peek())
def test_is_empty(self):
stack = MinMaxStack[int]()
self.assertTrue(stack.is_empty())
stack.push((2, 3))
self.assertFalse(stack.is_empty())
def test_size(self):
stack = MinMaxStack[int]()
self.assertEqual(0, len(stack))
stack.push((None, None))
self.assertEqual(1, len(stack))
stack.push((1, None))
self.assertEqual(2, len(stack))
stack.push((None, 4))
self.assertEqual(3, len(stack))
stack.push((2, None))
self.assertEqual(4, len(stack))
| 30.861538 | 92 | 0.563809 | from unittest import TestCase
from src.stack import MinMaxStack
class TestMinMaxStack(TestCase):
def test_push(self):
stack = MinMaxStack[int]()
stack.push((None, None))
stack.push((1, None))
stack.push((None, 4))
stack.push((2, None))
stack.push((None, 3))
self.assertEqual([(None, None), (1, None), (1, 4), (2, 4), (2, 3)], stack.as_list())
def test_pop_and_push(self):
stack = MinMaxStack[int]()
stack.pop_and_push((None, None))
stack.pop_and_push((1, None))
stack.pop_and_push((None, 4))
stack.pop_and_push((2, None))
stack.pop_and_push((None, 3))
self.assertEqual([(2, 3)], stack.as_list())
def test_pop(self):
stack = MinMaxStack[int]()
stack.push((None, None))
stack.push((1, None))
stack.push((None, 4))
stack.push((2, None))
stack.push((None, 3))
self.assertEqual((2, 3), stack.pop())
self.assertEqual((2, 4), stack.pop())
self.assertEqual((1, 4), stack.pop())
self.assertEqual((1, None), stack.pop())
self.assertEqual((None, None), stack.pop())
with self.assertRaises(IndexError):
stack.pop()
def test_peek(self):
stack = MinMaxStack[int]()
with self.assertRaises(IndexError):
stack.peek()
stack.push((2, 3))
self.assertEqual((2, 3), stack.peek())
def test_is_empty(self):
stack = MinMaxStack[int]()
self.assertTrue(stack.is_empty())
stack.push((2, 3))
self.assertFalse(stack.is_empty())
def test_size(self):
stack = MinMaxStack[int]()
self.assertEqual(0, len(stack))
stack.push((None, None))
self.assertEqual(1, len(stack))
stack.push((1, None))
self.assertEqual(2, len(stack))
stack.push((None, 4))
self.assertEqual(3, len(stack))
stack.push((2, None))
self.assertEqual(4, len(stack))
| true | true |
1c35db4708c0469265f6ce43cecb30f2ba3a546c | 1,177 | py | Python | gamechangerml/src/utilities/np_utils.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 11 | 2021-05-05T17:52:10.000Z | 2022-02-04T15:12:29.000Z | gamechangerml/src/utilities/np_utils.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 76 | 2021-07-24T02:33:16.000Z | 2022-03-20T22:40:46.000Z | gamechangerml/src/utilities/np_utils.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 6 | 2021-06-30T22:18:56.000Z | 2022-03-22T16:54:50.000Z | """
A collection of `numpy` utilities.
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def is_zero_vector(v):
"""
Tests if a vector is a zero vector.
Args:
v (numpy.ndarray): vector
Returns:
boolean: True if every element is zero
"""
return np.all(v == 0.0)
def l2_norm_by_row(matrix):
"""
Row by row l2 norm of a matrix using Einstein summation.
Args:
matrix (numpy.ndarray): the matrix
Returns:
numpy.ndarray
"""
return np.sqrt(np.einsum("ij,ij->i", matrix, matrix))
def l2_normed_matrix(matrix):
"""
Normalizes a matrix using the `l2` norm.
Args:
matrix (numpy.ndarray): the matrix
Returns:
numpy.ndarray
"""
l2 = l2_norm_by_row(matrix)
return matrix / l2[:, None]
def l2_norm_vector(vector):
if not np.isfinite(vector).all() or is_zero_vector(vector):
logger.warning("invalid vector")
if is_zero_vector(vector):
logger.warning("zero vector")
norm_ = np.linalg.norm(vector)
# logger.info("{} {}".format(vector.shape, norm_))
return np.true_divide(vector, norm_)
| 19.616667 | 63 | 0.621071 | import logging
import numpy as np
logger = logging.getLogger(__name__)
def is_zero_vector(v):
return np.all(v == 0.0)
def l2_norm_by_row(matrix):
return np.sqrt(np.einsum("ij,ij->i", matrix, matrix))
def l2_normed_matrix(matrix):
l2 = l2_norm_by_row(matrix)
return matrix / l2[:, None]
def l2_norm_vector(vector):
if not np.isfinite(vector).all() or is_zero_vector(vector):
logger.warning("invalid vector")
if is_zero_vector(vector):
logger.warning("zero vector")
norm_ = np.linalg.norm(vector)
return np.true_divide(vector, norm_)
| true | true |
1c35dd5104ab1bd7b70c00ed07b14970e87e950b | 3,940 | py | Python | presalytics/lib/themes/ooxml.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | 4 | 2020-02-21T16:30:46.000Z | 2021-01-12T12:22:03.000Z | presalytics/lib/themes/ooxml.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | 4 | 2019-12-28T19:30:08.000Z | 2020-03-31T19:27:45.000Z | presalytics/lib/themes/ooxml.py | presalytics/python-client | 5d80b78562126feeeb49af4738e2c1aed12dce3a | [
"MIT"
] | null | null | null | import typing
import presalytics
import presalytics.client.auth
import presalytics.story.components
import presalytics.story.outline
import presalytics.story.util
import presalytics.client.presalytics_ooxml_automation.models
class OoxmlTheme(presalytics.story.components.ThemeBase):
"""
TODO: Review whether this class is obsolete
"""
name: str
ooxml_id: str
plugin_config: typing.Dict
always_refresh: bool
__component_kind__ = 'ooxml-theme'
def __init__(self,
name,
ooxml_theme_id,
plugin_config=None,
always_refresh=False,
client_info={},
**kwargs):
super(OoxmlTheme, self).__init__(**kwargs)
self.name = name
self.ooxml_id = ooxml_theme_id
self.always_refresh = always_refresh
self.client_kwargs = client_info
if not plugin_config or self.always_refresh:
if not plugin_config:
self.plugin_config = {}
self.get_configuration()
def get_configuration(self):
client = presalytics.Client(**self.client_kwargs)
theme = client.ooxml_automation.theme_themes_details_get_id(self.ooxml_id)
extra_params = ['dateCreated', 'dateModified', 'userCreated', 'userModified', 'id', 'themeId']
colors = {k: v for k, v in theme.colors.to_dict().items() if k not in extra_params}
fonts = {k: v for k, v in theme.fonts.to_dict().items() if k not in extra_params}
slide_details = client.ooxml_automation.slides_slides_details_get_id(theme.slide_id)
color_map_dict = slide_details.slide_master.to_dict()["color_map"]
color_types = client.ooxml_automation.shared_colortypes_get()
mapped_colors = {
"background1": self.map_color_type("background1", color_map_dict, colors, color_types),
"background2": self.map_color_type("background2", color_map_dict, colors, color_types),
"text1": self.map_color_type("text1", color_map_dict, colors, color_types),
"text2": self.map_color_type("text2", color_map_dict, colors, color_types)
}
color_params = {k: v for k, v in colors.items() if k not in extra_params}
color_params.update(mapped_colors)
params = {k: v for k, v in color_params.items() if k not in extra_params}
params.update(fonts)
self.plugin_config = params
def map_color_type(
self,
color_map_name: str,
color_map: typing.Dict,
theme_colors: typing.Dict,
color_types_list=None) -> str:
if not color_types_list:
client = presalytics.Client(**self.client_kwargs)
color_types_list = client.ooxml_automation.shared_colortypes_get()
color_id = color_map[color_map_name]
color_name = next(x.name for x in color_types_list if x.type_id == color_id)
key = color_name[0].lower() + color_name[1:]
color = theme_colors.get(key, None)
return color
def serialize(self):
plugin = presalytics.story.outline.Plugin(
kind='style',
name='ooxml-theme',
config=self.plugin_config
)
data = {
"ooxml_theme_id": self.ooxml_id,
"always_refresh": self.always_refresh,
}
theme = presalytics.story.outline.Theme(
kind=self.__component_kind__,
name=self.name,
data=data,
plugins=[plugin]
)
return theme
@classmethod
def deseriailize(cls, component, **kwargs):
plugin_config = component.plugins[0].config
return cls(
name=component.name,
ooxml_theme_id=component.data["ooxml_theme_id"],
plugin_config=plugin_config,
always_refresh=component.data["always_refresh"],
**kwargs
)
| 37.52381 | 102 | 0.63198 | import typing
import presalytics
import presalytics.client.auth
import presalytics.story.components
import presalytics.story.outline
import presalytics.story.util
import presalytics.client.presalytics_ooxml_automation.models
class OoxmlTheme(presalytics.story.components.ThemeBase):
name: str
ooxml_id: str
plugin_config: typing.Dict
always_refresh: bool
__component_kind__ = 'ooxml-theme'
def __init__(self,
name,
ooxml_theme_id,
plugin_config=None,
always_refresh=False,
client_info={},
**kwargs):
super(OoxmlTheme, self).__init__(**kwargs)
self.name = name
self.ooxml_id = ooxml_theme_id
self.always_refresh = always_refresh
self.client_kwargs = client_info
if not plugin_config or self.always_refresh:
if not plugin_config:
self.plugin_config = {}
self.get_configuration()
def get_configuration(self):
client = presalytics.Client(**self.client_kwargs)
theme = client.ooxml_automation.theme_themes_details_get_id(self.ooxml_id)
extra_params = ['dateCreated', 'dateModified', 'userCreated', 'userModified', 'id', 'themeId']
colors = {k: v for k, v in theme.colors.to_dict().items() if k not in extra_params}
fonts = {k: v for k, v in theme.fonts.to_dict().items() if k not in extra_params}
slide_details = client.ooxml_automation.slides_slides_details_get_id(theme.slide_id)
color_map_dict = slide_details.slide_master.to_dict()["color_map"]
color_types = client.ooxml_automation.shared_colortypes_get()
mapped_colors = {
"background1": self.map_color_type("background1", color_map_dict, colors, color_types),
"background2": self.map_color_type("background2", color_map_dict, colors, color_types),
"text1": self.map_color_type("text1", color_map_dict, colors, color_types),
"text2": self.map_color_type("text2", color_map_dict, colors, color_types)
}
color_params = {k: v for k, v in colors.items() if k not in extra_params}
color_params.update(mapped_colors)
params = {k: v for k, v in color_params.items() if k not in extra_params}
params.update(fonts)
self.plugin_config = params
def map_color_type(
self,
color_map_name: str,
color_map: typing.Dict,
theme_colors: typing.Dict,
color_types_list=None) -> str:
if not color_types_list:
client = presalytics.Client(**self.client_kwargs)
color_types_list = client.ooxml_automation.shared_colortypes_get()
color_id = color_map[color_map_name]
color_name = next(x.name for x in color_types_list if x.type_id == color_id)
key = color_name[0].lower() + color_name[1:]
color = theme_colors.get(key, None)
return color
def serialize(self):
plugin = presalytics.story.outline.Plugin(
kind='style',
name='ooxml-theme',
config=self.plugin_config
)
data = {
"ooxml_theme_id": self.ooxml_id,
"always_refresh": self.always_refresh,
}
theme = presalytics.story.outline.Theme(
kind=self.__component_kind__,
name=self.name,
data=data,
plugins=[plugin]
)
return theme
@classmethod
def deseriailize(cls, component, **kwargs):
plugin_config = component.plugins[0].config
return cls(
name=component.name,
ooxml_theme_id=component.data["ooxml_theme_id"],
plugin_config=plugin_config,
always_refresh=component.data["always_refresh"],
**kwargs
)
| true | true |
1c35ddf24a07d5e4617b24a5101c7fad8dff9e53 | 1,527 | py | Python | process_cuwb_data/honeycomb_imu_data.py | WildflowerSchools/wf-process-cuwb-data | 8d94eeec82401f0f62ce1e0b7fdefd49e0328921 | [
"MIT"
] | null | null | null | process_cuwb_data/honeycomb_imu_data.py | WildflowerSchools/wf-process-cuwb-data | 8d94eeec82401f0f62ce1e0b7fdefd49e0328921 | [
"MIT"
] | 2 | 2020-10-01T18:18:05.000Z | 2020-12-17T22:40:06.000Z | process_cuwb_data/honeycomb_imu_data.py | WildflowerSchools/wf-process-cuwb-data | 8d94eeec82401f0f62ce1e0b7fdefd49e0328921 | [
"MIT"
] | null | null | null | from honeycomb_io import fetch_cuwb_position_data, fetch_cuwb_accelerometer_data, fetch_cuwb_gyroscope_data, fetch_cuwb_magnetometer_data, add_device_assignment_info, add_device_entity_assignment_info, add_tray_material_assignment_info
from .utils.util import filter_entity_type
def fetch_imu_data(imu_type,
environment,
start,
end,
entity_type='all'):
if imu_type == 'position':
fetch = fetch_cuwb_position_data
elif imu_type == 'accelerometer':
fetch = fetch_cuwb_accelerometer_data
elif imu_type == 'gyroscope':
fetch = fetch_cuwb_gyroscope_data
elif imu_type == 'magnetometer':
fetch = fetch_cuwb_magnetometer_data
else:
raise ValueError("Unexpected IMU type: {}".format(imu_type))
df = fetch(
start=start,
end=end,
device_ids=None,
environment_id=None,
environment_name=environment,
device_types=['UWBTAG'],
output_format='dataframe',
sort_arguments={"field": "timestamp"},
chunk_size=20000
)
if len(df) == 0:
return None
# Add metadata
df = add_device_assignment_info(df)
df = add_device_entity_assignment_info(df)
df = add_tray_material_assignment_info(df)
# Filter on entity type
df = filter_entity_type(df, entity_type=entity_type)
df['type'] = imu_type
df.reset_index(drop=True, inplace=True)
df.set_index('timestamp', inplace=True)
return df
| 32.489362 | 235 | 0.670596 | from honeycomb_io import fetch_cuwb_position_data, fetch_cuwb_accelerometer_data, fetch_cuwb_gyroscope_data, fetch_cuwb_magnetometer_data, add_device_assignment_info, add_device_entity_assignment_info, add_tray_material_assignment_info
from .utils.util import filter_entity_type
def fetch_imu_data(imu_type,
environment,
start,
end,
entity_type='all'):
if imu_type == 'position':
fetch = fetch_cuwb_position_data
elif imu_type == 'accelerometer':
fetch = fetch_cuwb_accelerometer_data
elif imu_type == 'gyroscope':
fetch = fetch_cuwb_gyroscope_data
elif imu_type == 'magnetometer':
fetch = fetch_cuwb_magnetometer_data
else:
raise ValueError("Unexpected IMU type: {}".format(imu_type))
df = fetch(
start=start,
end=end,
device_ids=None,
environment_id=None,
environment_name=environment,
device_types=['UWBTAG'],
output_format='dataframe',
sort_arguments={"field": "timestamp"},
chunk_size=20000
)
if len(df) == 0:
return None
df = add_device_assignment_info(df)
df = add_device_entity_assignment_info(df)
df = add_tray_material_assignment_info(df)
df = filter_entity_type(df, entity_type=entity_type)
df['type'] = imu_type
df.reset_index(drop=True, inplace=True)
df.set_index('timestamp', inplace=True)
return df
| true | true |
1c35de4f81c28c46e9cb11c08b87d421ba5c8ca8 | 922 | py | Python | uncertainty_baselines/models/criteo_mlp_test.py | y0ast/uncertainty-baselines | 8d32c77ba0803ed715c1406378adf10ebd61ab74 | [
"Apache-2.0"
] | 794 | 2020-07-17T06:23:58.000Z | 2022-03-31T08:31:53.000Z | uncertainty_baselines/models/criteo_mlp_test.py | ranganathkrishnan/uncertainty-baselines | b9c6b870790034c1a2303246f887fd2cf53bff38 | [
"Apache-2.0"
] | 136 | 2020-08-04T22:42:04.000Z | 2022-03-26T21:07:03.000Z | uncertainty_baselines/models/criteo_mlp_test.py | ranganathkrishnan/uncertainty-baselines | b9c6b870790034c1a2303246f887fd2cf53bff38 | [
"Apache-2.0"
] | 129 | 2020-08-16T12:46:55.000Z | 2022-03-31T23:00:10.000Z | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for uncertainty_baselines.models.criteo_mlp."""
import tensorflow as tf
import uncertainty_baselines as ub
class CriteoMlpTest(tf.test.TestCase):
def testCreateModel(self):
model = ub.models.criteo_mlp(31)
self.assertLen(model.layers, 47)
if __name__ == '__main__':
tf.test.main()
| 29.741935 | 74 | 0.75705 |
import tensorflow as tf
import uncertainty_baselines as ub
class CriteoMlpTest(tf.test.TestCase):
def testCreateModel(self):
model = ub.models.criteo_mlp(31)
self.assertLen(model.layers, 47)
if __name__ == '__main__':
tf.test.main()
| true | true |
1c35df3b9d562344d9d36cd71b15b4471d668925 | 4,164 | py | Python | tests/test_agent_wrappers.py | vengalraoguttha/EGG | e4f8412f197543ec7f1f00cf89b5a364b038dc57 | [
"MIT"
] | 254 | 2019-06-05T00:20:39.000Z | 2022-03-25T04:46:12.000Z | tests/test_agent_wrappers.py | vengalraoguttha/EGG | e4f8412f197543ec7f1f00cf89b5a364b038dc57 | [
"MIT"
] | 95 | 2019-06-17T19:01:18.000Z | 2022-03-21T15:18:49.000Z | tests/test_agent_wrappers.py | vengalraoguttha/EGG | e4f8412f197543ec7f1f00cf89b5a364b038dc57 | [
"MIT"
] | 97 | 2019-06-05T02:04:14.000Z | 2022-03-28T19:10:21.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from pathlib import Path
import torch
from torch.nn import functional as F
import egg.core as core
sys.path.insert(0, Path(__file__).parent.parent.resolve().as_posix())
BATCH_X = torch.eye(8)
BATCH_Y = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1]).long()
class Dataset:
def __iter__(self):
return iter([(BATCH_X, BATCH_Y)])
class ToyAgent(torch.nn.Module):
def __init__(self):
super(ToyAgent, self).__init__()
self.fc1 = torch.nn.Linear(8, 2, bias=False)
def forward(self, x, _aux_input=None):
x = self.fc1(x)
return F.log_softmax(x, dim=1)
class Receiver(torch.nn.Module):
def __init__(self):
super(Receiver, self).__init__()
def forward(self, x, _input=None, _aux_input=None):
return x
def test_toy_agent_gs():
core.init()
agent = core.GumbelSoftmaxWrapper(ToyAgent())
agent.eval()
output = agent(BATCH_X)
assert output.size() == torch.Size((8, 2))
assert (output > 0).sum() == 8
agent.train()
agent.temperature = 10.0
output = agent(BATCH_X, {})
assert output.size() == torch.Size((8, 2))
assert (output > 0).sum() == 16
agent.temperature = 0.5
optimizer = torch.optim.Adam(agent.parameters())
for _ in range(1000):
optimizer.zero_grad()
out = agent(BATCH_X, {})
loss = F.cross_entropy(out, BATCH_Y)
loss.backward()
optimizer.step()
assert (agent.agent.fc1.weight.t().argmax(dim=1) == BATCH_Y).all()
def test_game_gs():
core.init()
sender = core.GumbelSoftmaxWrapper(ToyAgent())
receiver = Receiver()
loss = lambda sender_input, message, receiver_input, receiver_output, labels, aux_input: (
F.cross_entropy(receiver_output, labels),
{},
)
game = core.SymbolGameGS(sender, receiver, loss)
optimizer = torch.optim.Adam(game.parameters())
data = Dataset()
trainer = core.Trainer(game, optimizer, train_data=data, validation_data=None)
trainer.train(1000)
assert (sender.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all()
def test_toy_agent_reinforce():
core.init()
agent = core.ReinforceWrapper(ToyAgent())
optimizer = torch.optim.Adam(agent.parameters())
for _ in range(1000):
optimizer.zero_grad()
output, log_prob, entropy = agent(BATCH_X, {})
loss = -((output == BATCH_Y).float() * log_prob).mean()
loss.backward()
optimizer.step()
assert (agent.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all()
def test_game_reinforce():
core.init()
sender = core.ReinforceWrapper(ToyAgent())
receiver = core.ReinforceDeterministicWrapper(Receiver())
loss = lambda sender_input, message, receiver_input, receiver_output, labels, aux_input: (
-(receiver_output == labels).float(),
{},
)
game = core.SymbolGameReinforce(
sender, receiver, loss, sender_entropy_coeff=1e-1, receiver_entropy_coeff=0.0
)
optimizer = torch.optim.Adagrad(game.parameters(), lr=1e-1)
data = Dataset()
trainer = core.Trainer(game, optimizer, train_data=data, validation_data=None)
trainer.train(5000)
assert (sender.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all(), str(
sender.agent.fc1.weight
)
def test_symbol_wrapper():
core.init()
receiver = core.SymbolReceiverWrapper(Receiver(), vocab_size=15, agent_input_size=5)
# when trained with REINFORCE, the message would be encoded as long ids
message_rf = torch.randint(high=15, size=(16,)).long()
output_rf = receiver(message_rf)
assert output_rf.size() == torch.Size((16, 5))
# when trained with Gumbel-Softmax, the message would be encoded as one-hots
message_gs = torch.zeros((16, 15))
message_gs.scatter_(
1, message_rf.unsqueeze(1), 1.0
) # same message, one-hot-encoded
output_gs = receiver(message_gs)
assert output_rf.eq(output_gs).all().item() == 1
| 27.215686 | 94 | 0.65634 |
import sys
from pathlib import Path
import torch
from torch.nn import functional as F
import egg.core as core
sys.path.insert(0, Path(__file__).parent.parent.resolve().as_posix())
BATCH_X = torch.eye(8)
BATCH_Y = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1]).long()
class Dataset:
def __iter__(self):
return iter([(BATCH_X, BATCH_Y)])
class ToyAgent(torch.nn.Module):
def __init__(self):
super(ToyAgent, self).__init__()
self.fc1 = torch.nn.Linear(8, 2, bias=False)
def forward(self, x, _aux_input=None):
x = self.fc1(x)
return F.log_softmax(x, dim=1)
class Receiver(torch.nn.Module):
def __init__(self):
super(Receiver, self).__init__()
def forward(self, x, _input=None, _aux_input=None):
return x
def test_toy_agent_gs():
core.init()
agent = core.GumbelSoftmaxWrapper(ToyAgent())
agent.eval()
output = agent(BATCH_X)
assert output.size() == torch.Size((8, 2))
assert (output > 0).sum() == 8
agent.train()
agent.temperature = 10.0
output = agent(BATCH_X, {})
assert output.size() == torch.Size((8, 2))
assert (output > 0).sum() == 16
agent.temperature = 0.5
optimizer = torch.optim.Adam(agent.parameters())
for _ in range(1000):
optimizer.zero_grad()
out = agent(BATCH_X, {})
loss = F.cross_entropy(out, BATCH_Y)
loss.backward()
optimizer.step()
assert (agent.agent.fc1.weight.t().argmax(dim=1) == BATCH_Y).all()
def test_game_gs():
core.init()
sender = core.GumbelSoftmaxWrapper(ToyAgent())
receiver = Receiver()
loss = lambda sender_input, message, receiver_input, receiver_output, labels, aux_input: (
F.cross_entropy(receiver_output, labels),
{},
)
game = core.SymbolGameGS(sender, receiver, loss)
optimizer = torch.optim.Adam(game.parameters())
data = Dataset()
trainer = core.Trainer(game, optimizer, train_data=data, validation_data=None)
trainer.train(1000)
assert (sender.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all()
def test_toy_agent_reinforce():
core.init()
agent = core.ReinforceWrapper(ToyAgent())
optimizer = torch.optim.Adam(agent.parameters())
for _ in range(1000):
optimizer.zero_grad()
output, log_prob, entropy = agent(BATCH_X, {})
loss = -((output == BATCH_Y).float() * log_prob).mean()
loss.backward()
optimizer.step()
assert (agent.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all()
def test_game_reinforce():
core.init()
sender = core.ReinforceWrapper(ToyAgent())
receiver = core.ReinforceDeterministicWrapper(Receiver())
loss = lambda sender_input, message, receiver_input, receiver_output, labels, aux_input: (
-(receiver_output == labels).float(),
{},
)
game = core.SymbolGameReinforce(
sender, receiver, loss, sender_entropy_coeff=1e-1, receiver_entropy_coeff=0.0
)
optimizer = torch.optim.Adagrad(game.parameters(), lr=1e-1)
data = Dataset()
trainer = core.Trainer(game, optimizer, train_data=data, validation_data=None)
trainer.train(5000)
assert (sender.agent.fc1.weight.t().argmax(dim=1).cpu() == BATCH_Y).all(), str(
sender.agent.fc1.weight
)
def test_symbol_wrapper():
core.init()
receiver = core.SymbolReceiverWrapper(Receiver(), vocab_size=15, agent_input_size=5)
message_rf = torch.randint(high=15, size=(16,)).long()
output_rf = receiver(message_rf)
assert output_rf.size() == torch.Size((16, 5))
message_gs = torch.zeros((16, 15))
message_gs.scatter_(
1, message_rf.unsqueeze(1), 1.0
)
output_gs = receiver(message_gs)
assert output_rf.eq(output_gs).all().item() == 1
| true | true |
1c35dfa8f12ab7afea203f37dff851c3d86d213b | 3,241 | py | Python | tests/job/test_lifecycle.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | null | null | null | tests/job/test_lifecycle.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | 6 | 2021-12-01T13:05:28.000Z | 2022-03-07T12:40:10.000Z | tests/job/test_lifecycle.py | workfloworchestrator/SuPA | 75c34a446e7133ac3f9378810db749a7df2c21a3 | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
from typing import Any
from sqlalchemy import Column
import tests.shared.state_machine as state_machine
from supa.job.lifecycle import TerminateJob
from supa.util.timestamp import current_timestamp
def test_terminate_job_auto_start(
connection_id: Column,
connection: None,
terminating: None,
auto_start: None,
auto_start_job: None,
get_stub: None,
caplog: Any,
) -> None:
"""Test TerminateJob to transition to Terminated and transition data plane to Deactivated."""
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
assert state_machine.is_deactivated(connection_id)
assert state_machine.is_terminated(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic enable of data plane at start time" in caplog.text
def test_terminate_job_auto_end(
connection_id: Column, connection: None, terminating: None, auto_end: None, get_stub: None, caplog: Any
) -> None:
"""Test TerminateJob to transition to Terminated, add DeactivateJob and canceling AutoEndJob."""
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
# assert state_machine.is_deactivating(connection_id) # FIXME need DeactivateJob monkey patch
assert state_machine.is_terminated(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic disable of data plane at end time" in caplog.text
assert 'Added job "DeactivateJob" to job store' in caplog.text
def test_terminate_job_activated(
connection_id: Column, connection: None, terminating: None, activated: None, get_stub: None, caplog: Any
) -> None:
"""Test TerminateJob to transition to Terminated and add DeactivateJob."""
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
# assert state_machine.is_deactivating(connection_id) # FIXME need DeactivateJob monkey patch
assert state_machine.is_terminated(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic disable of data plane at end time" not in caplog.text
assert 'Added job "DeactivateJob" to job store' in caplog.text
def test_terminate_job_recover(connection_id: Column, terminating: None, get_stub: None, caplog: Any) -> None:
"""Test TerminateJob to recover reservations in state Terminating."""
terminate_job = TerminateJob(connection_id)
job_list = terminate_job.recover()
assert len(job_list) == 1
assert job_list[0].connection_id == connection_id
assert state_machine.is_terminating(connection_id)
msgs = [
logrecord.msg
for logrecord in caplog.records
if "job" in logrecord.msg and logrecord.msg["job"] == "TerminateJob"
]
assert len(msgs) == 1
assert msgs[0]["connection_id"] == str(connection_id)
assert msgs[0]["event"] == "Recovering job"
def test_terminate_job_trigger(connection_id: Column, caplog: Any) -> None:
"""Test TerminateJob to return trigger to run immediately."""
terminate_job = TerminateJob(connection_id)
job_trigger = terminate_job.trigger()
assert current_timestamp() - job_trigger.run_date < timedelta(seconds=5) # more or less now
| 41.551282 | 110 | 0.754088 | from datetime import timedelta
from typing import Any
from sqlalchemy import Column
import tests.shared.state_machine as state_machine
from supa.job.lifecycle import TerminateJob
from supa.util.timestamp import current_timestamp
def test_terminate_job_auto_start(
connection_id: Column,
connection: None,
terminating: None,
auto_start: None,
auto_start_job: None,
get_stub: None,
caplog: Any,
) -> None:
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
assert state_machine.is_deactivated(connection_id)
assert state_machine.is_terminated(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic enable of data plane at start time" in caplog.text
def test_terminate_job_auto_end(
connection_id: Column, connection: None, terminating: None, auto_end: None, get_stub: None, caplog: Any
) -> None:
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic disable of data plane at end time" in caplog.text
assert 'Added job "DeactivateJob" to job store' in caplog.text
def test_terminate_job_activated(
connection_id: Column, connection: None, terminating: None, activated: None, get_stub: None, caplog: Any
) -> None:
terminate_job = TerminateJob(connection_id)
terminate_job.__call__()
(connection_id)
assert "Terminating reservation" in caplog.text
assert "Canceled automatic disable of data plane at end time" not in caplog.text
assert 'Added job "DeactivateJob" to job store' in caplog.text
def test_terminate_job_recover(connection_id: Column, terminating: None, get_stub: None, caplog: Any) -> None:
terminate_job = TerminateJob(connection_id)
job_list = terminate_job.recover()
assert len(job_list) == 1
assert job_list[0].connection_id == connection_id
assert state_machine.is_terminating(connection_id)
msgs = [
logrecord.msg
for logrecord in caplog.records
if "job" in logrecord.msg and logrecord.msg["job"] == "TerminateJob"
]
assert len(msgs) == 1
assert msgs[0]["connection_id"] == str(connection_id)
assert msgs[0]["event"] == "Recovering job"
def test_terminate_job_trigger(connection_id: Column, caplog: Any) -> None:
terminate_job = TerminateJob(connection_id)
job_trigger = terminate_job.trigger()
assert current_timestamp() - job_trigger.run_date < timedelta(seconds=5)
| true | true |
1c35dff0a37402a1b63768cdbb3b60fdc7171aaf | 2,625 | py | Python | WX272_lab4_thkn.py | Kyl67899/python-labs | aafc6fc94837ee43c9ef2e1b103d86f80dfc9814 | [
"FSFAP"
] | null | null | null | WX272_lab4_thkn.py | Kyl67899/python-labs | aafc6fc94837ee43c9ef2e1b103d86f80dfc9814 | [
"FSFAP"
] | null | null | null | WX272_lab4_thkn.py | Kyl67899/python-labs | aafc6fc94837ee43c9ef2e1b103d86f80dfc9814 | [
"FSFAP"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 15:20:21 2020
@author: parsotak
"""
# import datasets
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
#Define input file
infile = '/wx/storage/halpea17/wx272/20170909_erai.nc'
#Read in the file
f = Dataset(infile, 'r')
#read in geo height contours at 00z 1000-500mb
gpot_1000mb = f['HGT_GDS0_ISBL'][2, 3, :, :]
gpot_500mb = f['HGT_GDS0_ISBL'][2, 3, :, :]
#Read in lat. and long.
lats = f['g0_lat_1'][:]
lons = f['g0_lon_2'][:]
print(lats.shape)
print(lons.shape)
#getting the thickness of the two mb Convert mb to dams
thkns = (gpot_500mb - gpot_1000mb)/ 10
#Using the heights to find the temps. from 1000-500mb at 12z and convert to C
tempsK = (10 * thkns) / 29.3 * (np.log(1000/500))
tempsC = tempsK - 273.15
#calculate the average temp from 1000 mb to 500 mb
#temp_avg = np.mean()
#from 1D to 2D to plot to a map
lon2d, lat2d = np.meshgrid(lons, lats)
#Define a figure
fig = plt.figure(figsize = (12,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#Define basemap
m = Basemap(llcrnrlon = 200., llcrnrlat = 20., urcrnrlon = 320., urcrnrlat = 65., resolution = 'l', projection = 'merc', ax = ax)
xi, yi = m(lon2d, lat2d)
m.drawcoastlines()
m.drawstates()
m.drawcountries()
#lat are 20 N - 65 N every 10 ;lons are 160 W - 40 W every 20.
m.drawparallels(np.arange(-80., 81., 10.), labels = [1, 0, 0, 0], fontsize = 12)
m.drawmeridians(np.arange(0., 359., 20.), labels = [0, 0 ,0, 1], fontsize = 12)
#range of temps avg
range_tempsavg = np.arange(-30, 21, 5)
#range for the avg contour
range_contour = np.arange(510, 601, 6)
#contours for temps
contour_temps1 = m.contourf(xi, yi, tempsC, range_tempsavg) # range_tempsavg is either not being read in and not being averaged
#not printing contours
#contours for the thinkness
contour_thkns = m.contour(xi, yi, thkns, range_contour)
#contour thinkness less than equal to 540 blue
#contour thinkness greater than 540 red
#Add colorbar for temps
cbar = plt.colorbar(contour_temps1, orientation = 'horizontal', pad = 0.05, shrink = 0.75, ax = ax, ticks = range_tempsavg)
#plot contours from each range with the color to define the differences in thinkness levels
#increase size of labels
cbar.ax.tick_params(labelsize = 14)
cbar.set_label('1000-500 mb average temperature ($^{o}$C)', fontsize = 14)
#add a title
ax.set_title('1000-500 mb thickness (dam) and average temperature $^{o}$C on 20151107 at 00Z', fontsize = 12)
#save png
plt.savefig("parsotak_lab4_thkn.png")
plt.show() | 21.341463 | 129 | 0.699048 |
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
infile = '/wx/storage/halpea17/wx272/20170909_erai.nc'
f = Dataset(infile, 'r')
gpot_1000mb = f['HGT_GDS0_ISBL'][2, 3, :, :]
gpot_500mb = f['HGT_GDS0_ISBL'][2, 3, :, :]
lats = f['g0_lat_1'][:]
lons = f['g0_lon_2'][:]
print(lats.shape)
print(lons.shape)
thkns = (gpot_500mb - gpot_1000mb)/ 10
tempsK = (10 * thkns) / 29.3 * (np.log(1000/500))
tempsC = tempsK - 273.15
lon2d, lat2d = np.meshgrid(lons, lats)
fig = plt.figure(figsize = (12,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon = 200., llcrnrlat = 20., urcrnrlon = 320., urcrnrlat = 65., resolution = 'l', projection = 'merc', ax = ax)
xi, yi = m(lon2d, lat2d)
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.drawparallels(np.arange(-80., 81., 10.), labels = [1, 0, 0, 0], fontsize = 12)
m.drawmeridians(np.arange(0., 359., 20.), labels = [0, 0 ,0, 1], fontsize = 12)
range_tempsavg = np.arange(-30, 21, 5)
range_contour = np.arange(510, 601, 6)
contour_temps1 = m.contourf(xi, yi, tempsC, range_tempsavg)
contour_thkns = m.contour(xi, yi, thkns, range_contour)
cbar = plt.colorbar(contour_temps1, orientation = 'horizontal', pad = 0.05, shrink = 0.75, ax = ax, ticks = range_tempsavg)
cbar.ax.tick_params(labelsize = 14)
cbar.set_label('1000-500 mb average temperature ($^{o}$C)', fontsize = 14)
ax.set_title('1000-500 mb thickness (dam) and average temperature $^{o}$C on 20151107 at 00Z', fontsize = 12)
plt.savefig("parsotak_lab4_thkn.png")
plt.show() | true | true |
1c35e085f8dd013b30da4d1fe0933d17adccad6e | 11,833 | py | Python | text-video-search/src/python/embedding.py | mikhail-tsir/vespa-exloration | 9bebc00acb43021fa60c6e144fe4f1fa1d7719fc | [
"Apache-2.0"
] | null | null | null | text-video-search/src/python/embedding.py | mikhail-tsir/vespa-exloration | 9bebc00acb43021fa60c6e144fe4f1fa1d7719fc | [
"Apache-2.0"
] | null | null | null | text-video-search/src/python/embedding.py | mikhail-tsir/vespa-exloration | 9bebc00acb43021fa60c6e144fe4f1fa1d7719fc | [
"Apache-2.0"
] | null | null | null | import os
import glob
import ntpath
from collections import Counter
import numpy as np
import imageio
from vespa.package import ApplicationPackage, Field, HNSW, RankProfile, QueryTypeField
from vespa.application import Vespa
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToPILImage
import clip
from tenacity import retry, wait_exponential, stop_after_attempt
def translate_model_names_to_valid_vespa_field_names(model_name):
return model_name.replace("/", "_").replace("-", "_").lower()
def sample_images(images, number_to_sample):
"""
Sample equally spaced frames from a list of image frames
:param images: a list of image frames.
:param number_to_sample: int representing the number os frames to sample.
:return: a numpy array containing the sample of frames.
"""
if number_to_sample < len(images):
idx = np.round(np.linspace(0, len(images) - 1, number_to_sample)).astype(int)
return np.array(images)[idx]
else:
return np.array(images)
def extract_images(video_path, number_frames):
"""
Extract equally spaced frames from a video.
:param video_path: Full .mp4 video path.
:param number_frames: Number of frames to sample.
:return: a numpy array containing the sample of frames.
"""
reader = imageio.get_reader(video_path, fps=1)
frames = []
for i, im in enumerate(reader):
frames.append(im)
return sample_images(frames, number_frames)
class VideoFeedDataset(Dataset):
def __init__(self, video_dir, model_name, number_frames_per_video):
"""
PyTorch Dataset to compute video embeddings and return pyvespa-compatible feed data.
:param video_dir: Folder containing .mp4 video files.
:param model_name: CLIP model name.
:param number_frames_per_video: Number of embeddings per video.
"""
self.video_dir = video_dir
self.number_frames_per_video = number_frames_per_video
valid_vespa_model_name = translate_model_names_to_valid_vespa_field_names(
model_name
)
self.from_tensor_to_PIL = ToPILImage()
self.model, self.preprocess = clip.load(model_name)
self.video_file_names = glob.glob(os.path.join(video_dir, "*.mp4"))
self.image_embedding_name = valid_vespa_model_name + "_image"
def _from_image_to_vector(self, x):
"""
From image to embedding.
:param x: PIL images
:return: normalized image embeddings.
"""
with torch.no_grad():
image_features = self.model.encode_image(
self.preprocess(x).unsqueeze(0)
).float()
image_features /= image_features.norm(dim=-1, keepdim=True)
return image_features
def __len__(self):
return len(self.video_file_names)
def __getitem__(self, idx):
video_file_name = self.video_file_names[idx]
images = extract_images(
video_path=video_file_name, number_frames=self.number_frames_per_video
)
pil_images = [self.from_tensor_to_PIL(x) for x in images]
frames = []
for idx, image in enumerate(pil_images):
image = self._from_image_to_vector(image)
video_base_name = ntpath.basename(video_file_name)
frames.append(
{
"id": video_base_name.split(".mp4")[0] + "_{}".format(idx),
"fields": {
"video_file_name": video_base_name,
self.image_embedding_name: {"values": image.tolist()[0]},
},
"create": True,
}
)
return frames
@retry(wait=wait_exponential(multiplier=1), stop=stop_after_attempt(3))
def send_video_embeddings(app, batch):
"""
Send pyvespa-compatible batch to Vespa app.
:param app: pyvespa connection to a Vespa instance
:param batch: pyvespa-compatible list of data points to be updated.
:return: None
"""
responses = app.update_batch(batch=batch)
status_code_summary = Counter([x.status_code for x in responses])
if status_code_summary[200] != len(batch):
print([response.json for response in responses if response.status_code != 200])
raise ValueError("Failed to send data.")
print("Successfully sent {} data points.".format(status_code_summary[200]))
def compute_and_send_video_embeddings(
app, batch_size, clip_model_names, number_frames_per_video, video_dir, num_workers=0
):
"""
Loop through video folder, compute embeddings and send to Vespa app.
:param app: pyvespa connection to a Vespa instance
:param batch_size: Number of images to process per iteration.
:param clip_model_names: CLIP models names. It will generate one image embedding per model name.
:param number_frames_per_video: Number of frames to use per video.
:param video_dir: Complete path of the folder containing .mp4 video files.
:param num_workers: Number of workers to use (refers to the DataLoader parallelization)
:return: None
"""
for model_name in clip_model_names:
video_dataset = VideoFeedDataset(
video_dir=video_dir, # Folder containing image files
model_name=model_name, # CLIP model name used to convert image into vector
number_frames_per_video=number_frames_per_video,
)
dataloader = DataLoader(
video_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda x: [item for sublist in x for item in sublist],
num_workers=num_workers,
)
for idx, batch in enumerate(dataloader):
print(
"Model name: {}. Iteration: {}/{}".format(
model_name, idx, len(dataloader)
)
)
send_video_embeddings(app=app, batch=batch)
class TextProcessor(object):
def __init__(self, model_name):
"""
Python-based text processor.
:param model_name: CLIP model name to use embedding text.
"""
self.model, _ = clip.load(model_name)
self.model_name = model_name
def embed(self, text):
"""
Convert text to (normalized) embedding
:param text: a string to be embedded.
:return: Normalized embedding vector.
"""
text_tokens = clip.tokenize(text)
with torch.no_grad():
text_features = self.model.encode_text(text_tokens).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features.tolist()[0]
def create_text_video_app(model_info):
"""
Create text to video search app based on a variety of CLIP models
:param model_info: dict containing model names as keys and embedding size as values.
Check `clip.available_models()` to check which models are available.
:return: A Vespa application package.
"""
app_package = ApplicationPackage(name="video_search")
app_package.schema.add_fields(
Field(name="video_file_name", type="string", indexing=["summary", "attribute"]),
)
for model_name, embedding_size in model_info.items():
model_name = translate_model_names_to_valid_vespa_field_names(model_name)
app_package.schema.add_fields(
Field(
name=model_name + "_image",
type="tensor<float>(x[{}])".format(embedding_size),
indexing=["attribute", "index"],
ann=HNSW(
distance_metric="euclidean",
max_links_per_node=16,
neighbors_to_explore_at_insert=500,
),
)
)
app_package.schema.add_rank_profile(
RankProfile(
name=model_name + "_similarity",
inherits="default",
first_phase="closeness({})".format(model_name + "_image"),
)
)
app_package.query_profile_type.add_fields(
QueryTypeField(
name="ranking.features.query({})".format(model_name + "_text"),
type="tensor<float>(x[{}])".format(embedding_size),
)
)
return app_package
def create_vespa_query(query, text_processor, number_videos):
"""
Create the body of a Vespa query.
:param query: a string representing the query.
:param text_processor: an instance of `TextProcessor` to convert string to embedding.
:param number_videos: Number of videos to return.
:return: body of a Vespa query request.
"""
valid_vespa_model_name = translate_model_names_to_valid_vespa_field_names(
text_processor.model_name
)
image_field_name = valid_vespa_model_name + "_image"
text_field_name = valid_vespa_model_name + "_text"
ranking_name = valid_vespa_model_name + "_similarity"
return {
"yql": 'select * from sources * where ({{"targetNumHits":100}}nearestNeighbor({},{})) | all(group(video_file_name) max({}) order(-max(relevance())) each( max(1) each(output(summary())) as(frame)) as(video))'.format(
image_field_name, text_field_name, number_videos
),
"hits": 0,
"ranking.features.query({})".format(text_field_name): text_processor.embed(
query
),
"ranking.profile": ranking_name,
"timeout": 10,
}
def search_video_file_names(app, query, text_processor, number_videos):
"""
Parse the output of the Vespa query.
Parse the output of the Vespa query to return a list with the video file name and
relevance score for each hit.
:param app: The pyvespa Vespa connection to the app.
:param query: The text query to be sent.
:param text_processor: An instance of the TextProcessor to turn text into embedding.
:param number_videos: The number of videos to be retrieved.
:return: a list with the video file name and relevance score for each hit.
"""
result = app.query(
body=create_vespa_query(
query=query, text_processor=text_processor, number_videos=number_videos
)
)
parsed_results = [
{
"video_file_name": video["children"][0]["children"][0]["fields"][
"video_file_name"
],
"relevance": video["children"][0]["children"][0]["relevance"],
}
for video in result.json["root"]["children"][0]["children"][0]["children"]
]
return parsed_results
class VideoSearchApp(object):
def __init__(self, app: Vespa, clip_model_name=None, text_processor=None):
"""
Video search app with custom query for video retrieval.
:param app: The pyvespa Vespa connection to the app.
:param clip_model_name: CLIP model name to turn text into embedding
:param text_processor: TextProcessor instance. `clip_model_name` will
be ignored if an instance is provided.
"""
if text_processor:
self.text_processor = text_processor
elif clip_model_name:
self.text_processor = TextProcessor(clip_model_name)
else:
ValueError("Provide a clip_model_name or an instance of TextProcessor")
self.app = app
def query(self, text, number_videos):
"""
Video search
:param text: Text query describing an action.
:param number_videos: Number of videos to retrieve.
:return: a list with the video file name and relevance score for each hit.
"""
return search_video_file_names(
app=self.app,
query=text,
text_processor=self.text_processor,
number_videos=number_videos,
)
| 36.186544 | 223 | 0.645145 | import os
import glob
import ntpath
from collections import Counter
import numpy as np
import imageio
from vespa.package import ApplicationPackage, Field, HNSW, RankProfile, QueryTypeField
from vespa.application import Vespa
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToPILImage
import clip
from tenacity import retry, wait_exponential, stop_after_attempt
def translate_model_names_to_valid_vespa_field_names(model_name):
return model_name.replace("/", "_").replace("-", "_").lower()
def sample_images(images, number_to_sample):
if number_to_sample < len(images):
idx = np.round(np.linspace(0, len(images) - 1, number_to_sample)).astype(int)
return np.array(images)[idx]
else:
return np.array(images)
def extract_images(video_path, number_frames):
reader = imageio.get_reader(video_path, fps=1)
frames = []
for i, im in enumerate(reader):
frames.append(im)
return sample_images(frames, number_frames)
class VideoFeedDataset(Dataset):
def __init__(self, video_dir, model_name, number_frames_per_video):
self.video_dir = video_dir
self.number_frames_per_video = number_frames_per_video
valid_vespa_model_name = translate_model_names_to_valid_vespa_field_names(
model_name
)
self.from_tensor_to_PIL = ToPILImage()
self.model, self.preprocess = clip.load(model_name)
self.video_file_names = glob.glob(os.path.join(video_dir, "*.mp4"))
self.image_embedding_name = valid_vespa_model_name + "_image"
def _from_image_to_vector(self, x):
with torch.no_grad():
image_features = self.model.encode_image(
self.preprocess(x).unsqueeze(0)
).float()
image_features /= image_features.norm(dim=-1, keepdim=True)
return image_features
def __len__(self):
return len(self.video_file_names)
def __getitem__(self, idx):
video_file_name = self.video_file_names[idx]
images = extract_images(
video_path=video_file_name, number_frames=self.number_frames_per_video
)
pil_images = [self.from_tensor_to_PIL(x) for x in images]
frames = []
for idx, image in enumerate(pil_images):
image = self._from_image_to_vector(image)
video_base_name = ntpath.basename(video_file_name)
frames.append(
{
"id": video_base_name.split(".mp4")[0] + "_{}".format(idx),
"fields": {
"video_file_name": video_base_name,
self.image_embedding_name: {"values": image.tolist()[0]},
},
"create": True,
}
)
return frames
@retry(wait=wait_exponential(multiplier=1), stop=stop_after_attempt(3))
def send_video_embeddings(app, batch):
responses = app.update_batch(batch=batch)
status_code_summary = Counter([x.status_code for x in responses])
if status_code_summary[200] != len(batch):
print([response.json for response in responses if response.status_code != 200])
raise ValueError("Failed to send data.")
print("Successfully sent {} data points.".format(status_code_summary[200]))
def compute_and_send_video_embeddings(
app, batch_size, clip_model_names, number_frames_per_video, video_dir, num_workers=0
):
for model_name in clip_model_names:
video_dataset = VideoFeedDataset(
video_dir=video_dir,
model_name=model_name,
number_frames_per_video=number_frames_per_video,
)
dataloader = DataLoader(
video_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=lambda x: [item for sublist in x for item in sublist],
num_workers=num_workers,
)
for idx, batch in enumerate(dataloader):
print(
"Model name: {}. Iteration: {}/{}".format(
model_name, idx, len(dataloader)
)
)
send_video_embeddings(app=app, batch=batch)
class TextProcessor(object):
def __init__(self, model_name):
self.model, _ = clip.load(model_name)
self.model_name = model_name
def embed(self, text):
text_tokens = clip.tokenize(text)
with torch.no_grad():
text_features = self.model.encode_text(text_tokens).float()
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features.tolist()[0]
def create_text_video_app(model_info):
app_package = ApplicationPackage(name="video_search")
app_package.schema.add_fields(
Field(name="video_file_name", type="string", indexing=["summary", "attribute"]),
)
for model_name, embedding_size in model_info.items():
model_name = translate_model_names_to_valid_vespa_field_names(model_name)
app_package.schema.add_fields(
Field(
name=model_name + "_image",
type="tensor<float>(x[{}])".format(embedding_size),
indexing=["attribute", "index"],
ann=HNSW(
distance_metric="euclidean",
max_links_per_node=16,
neighbors_to_explore_at_insert=500,
),
)
)
app_package.schema.add_rank_profile(
RankProfile(
name=model_name + "_similarity",
inherits="default",
first_phase="closeness({})".format(model_name + "_image"),
)
)
app_package.query_profile_type.add_fields(
QueryTypeField(
name="ranking.features.query({})".format(model_name + "_text"),
type="tensor<float>(x[{}])".format(embedding_size),
)
)
return app_package
def create_vespa_query(query, text_processor, number_videos):
valid_vespa_model_name = translate_model_names_to_valid_vespa_field_names(
text_processor.model_name
)
image_field_name = valid_vespa_model_name + "_image"
text_field_name = valid_vespa_model_name + "_text"
ranking_name = valid_vespa_model_name + "_similarity"
return {
"yql": 'select * from sources * where ({{"targetNumHits":100}}nearestNeighbor({},{})) | all(group(video_file_name) max({}) order(-max(relevance())) each( max(1) each(output(summary())) as(frame)) as(video))'.format(
image_field_name, text_field_name, number_videos
),
"hits": 0,
"ranking.features.query({})".format(text_field_name): text_processor.embed(
query
),
"ranking.profile": ranking_name,
"timeout": 10,
}
def search_video_file_names(app, query, text_processor, number_videos):
result = app.query(
body=create_vespa_query(
query=query, text_processor=text_processor, number_videos=number_videos
)
)
parsed_results = [
{
"video_file_name": video["children"][0]["children"][0]["fields"][
"video_file_name"
],
"relevance": video["children"][0]["children"][0]["relevance"],
}
for video in result.json["root"]["children"][0]["children"][0]["children"]
]
return parsed_results
class VideoSearchApp(object):
def __init__(self, app: Vespa, clip_model_name=None, text_processor=None):
if text_processor:
self.text_processor = text_processor
elif clip_model_name:
self.text_processor = TextProcessor(clip_model_name)
else:
ValueError("Provide a clip_model_name or an instance of TextProcessor")
self.app = app
def query(self, text, number_videos):
return search_video_file_names(
app=self.app,
query=text,
text_processor=self.text_processor,
number_videos=number_videos,
)
| true | true |
1c35e10434fa28fe4fcc4629a48c8d0eb5ec7751 | 31,843 | py | Python | armi/nuclearDataIO/xsCollections.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/nuclearDataIO/xsCollections.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/nuclearDataIO/xsCollections.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cross section collections contain cross sections for a single nuclide or region.
Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`, which
then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
These may represent microscopic or macroscopic neutron or photon cross sections. When they are macroscopic,
they generally represent a whole region with many nuclides, though this is not required.
See Also
--------
armi.nuclearDataIO.xsCollection.XSCollection : object that gets created.
Examples
--------
# creating a MicroscopicXSCollection by loading one from ISOTXS.
microLib = armi.nuclearDataIO.ISOTXS('ISOTXS')
micros = myLib.nuclides['U235AA'].micros
# creating macroscopic XS:
mc = MacroscopicCrossSectionCreator()
macroCollection = mc.createMacrosFromMicros(microLib, block)
blocksWithMacros = mc.createMacrosOnBlocklist(microLib, blocks)
"""
import numpy
from scipy import sparse
from armi import runLog
from armi.localization import exceptions
from armi.utils import properties
from armi.utils import units
# Basic cross-section types that are represented by a 1-D vector in the multigroup approximation
# No one is particularly proud of these names...we can claim
# they have some origin in the ISOTXS file format card 04 definition
# fmt: off
NGAMMA = "nGamma" # radiative capture
NAPLHA = "nalph" # (n, alpha)
NP = "np" # (n, proton)
ND = "nd" # (n, deuteron)
NT = "nt" # (n, triton)
FISSION_XS = "fission" # (n, fission)
N2N_XS = "n2n" # (n,2n)
NUSIGF = "nuSigF"
NU = "neutronsPerFission"
# fmt: on
CAPTURE_XS = [NGAMMA, NAPLHA, NP, ND, NT]
# Cross section types that are represented by 2-D matrices in the multigroup approximation
BASIC_SCAT_MATRIX = ["elasticScatter", "inelasticScatter", "n2nScatter"]
OTHER_SCAT_MATRIX = ["totalScatter", "elasticScatter1stOrder"]
HIGHORDER_SCATTER = "higherOrderScatter"
# Subset of vector xs used to evaluate absorption cross-section
ABSORPTION_XS = CAPTURE_XS + [FISSION_XS, N2N_XS]
# Subset of vector xs evaluated by _convertBasicXS
BASIC_XS = ABSORPTION_XS + [NUSIGF]
# Subset vector xs that are derived from basic cross sections
DERIVED_XS = ["absorption", "removal"]
# Total and transport are treated differently since they are 2D (can have multiple moments)
TOTAL_XS = ["total", "transport"]
# Subset of all basic cross sections that include removal and scattering
ALL_XS = BASIC_XS + BASIC_SCAT_MATRIX + OTHER_SCAT_MATRIX + DERIVED_XS + TOTAL_XS
# All xs collection data
ALL_COLLECTION_DATA = ALL_XS + [
"chi",
NU,
"strpd",
HIGHORDER_SCATTER,
"diffusionConstants",
]
E_CAPTURE = "ecapt"
E_FISSION = "efiss"
class XSCollection(object):
"""A cross section collection."""
_zeroes = {}
"""
A dict of numpy arrays set to the size of XSLibrary.numGroups.
This is used to initialize cross sections which may not exist for the specific nuclide.
Consequently, there should never be a situation where a cross section does not exist.
In addition, they are all pointers to the same array, so we're not generating too much
unnecessary data.
Notes
-----
This is a dict so that it can store multiple 0_g "matricies", i.e. vectors. Realistically,
during any given run there will only be a set of groups, e.g. 33.
"""
@classmethod
def getDefaultXs(cls, numGroups):
default = cls._zeroes.get(numGroups, None)
if default is None:
default = numpy.zeros(numGroups)
cls._zeroes[numGroups] = default
return default
def __init__(self, parent):
"""
Construct a NuclideCollection.
Parameters
----------
parent : object
The parent container, which may be a region, a nuclide, a block, etc.
"""
self.numGroups = None
self.transport = None
self.total = None
self.nGamma = None
self.fission = None
self.neutronsPerFission = None
self.chi = None
self.nalph = None
self.np = None
self.n2n = None
self.nd = None
self.nt = None
self.strpd = None
self.elasticScatter = None
self.inelasticScatter = None
self.n2nScatter = None
self.elasticScatter1stOrder = None
self.totalScatter = None
self.absorption = None
self.diffusionConstants = None
self.removal = None
self.nuSigF = None
self.higherOrderScatter = {}
self.source = "{}".format(parent)
def __getitem__(self, key):
"""
Access cross sections by key string (e.g. micros['fission'] = micros.fission.
Notes
-----
These containers were originally
dicts, but upgraded to objects with numpy values as specialization
was needed. This access method could/should be phased out.
"""
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def get(self, key, default):
try:
return self[key]
except (IndexError, KeyError, TypeError):
return default
def getAbsorptionXS(self):
"""Return total absorption XS, which is the sum of capture + fission + others."""
absXS = [
self.nGamma,
self.fission,
self.nalph,
self.np,
self.nd,
self.nt,
self.n2n,
]
return absXS
def getTotalScatterMatrix(self):
"""
Sum up scatter matrices to produce total scatter matrix.
Multiply reaction-based n2n scatter matrix by 2.0 to convert to production-based.
.. warning:: Not all lattice codes store (n,2n) matrices consistently. Some are
production-based and some are absorption-based. If you use an
absorption-based one, your scatter matrix will be off, generally
leading to about a percent error in your neutron balance.
Notes
-----
The total scattering matrix is produced by summing the elastic, inelastic, and n2n scattering matrices. If a
specific scattering matrix does not exist for a composition (nuclide or region) then it is skipped and a
warning is displayed stating that the scattering reaction is not available and is not included in the total
scattering matrix.
Example: When producing macroscopic cross sections in MC2-3 the code internally merges the elastic and
inelastic scattering matrices into a single elastic scattering matrix.
"""
scatters = []
totalScatterComponents = {
"elastic": self.elasticScatter,
"inelastic": self.inelasticScatter,
"n2n": self.n2nScatter * 2.0,
}
for sType, sMatrix in totalScatterComponents.items():
if sMatrix is not None:
scatters.append(sMatrix)
else:
runLog.warning(
"{} scattering matrix in {} is not defined. Generating total scattering matrix"
" without this data".format(sType.title(), self),
single=True,
)
return sum(scatters)
def clear(self):
"""Zero out all the cross sections; this is useful for creating dummy cross sections."""
for xsAttr in ALL_XS:
value = getattr(self, xsAttr)
# it should either be a list, a numpy array, or a sparse matrix
if isinstance(value, list):
value = [0.0] * len(value)
elif isinstance(value, numpy.ndarray):
value = numpy.zeros(value.shape)
elif value is None: # assume it is scipy.sparse
pass
elif value.nnz >= 0:
value = sparse.csr_matrix(value.shape)
setattr(self, xsAttr, value)
# need to do the same thing for the higherOrderScatter
for kk, currentMatrix in self.higherOrderScatter.items():
self.higherOrderScatter[kk] = sparse.csr_matrix(currentMatrix.shape)
@staticmethod
def collapseCrossSection(crossSection, weights):
r"""
Collapse a cross section into 1-group.
This is extremely useful for many analyses such as doing a shielding efficacy survey
or computing one-group reaction rates.
.. math::
\bar{\sigma} = \frac{\sum_g{\sigma_g \phi_g}}{\sum_g{\phi_g}}
Parameters
----------
crossSection : list
Multigroup cross section values
weights : list
energy group weights to apply (usually the multigroup flux)
Returns
-------
oneGroupXS : float
The one group cross section in the same units as the input cross section.
"""
mult = numpy.array(crossSection) * numpy.array(weights)
return sum(mult) / sum(weights)
def compare(self, other, flux, relativeTolerance=0, verbose=False):
"""Compare the cross sections between two XSCollections objects."""
equal = True
for xsName in ALL_COLLECTION_DATA:
myXsData = self.__dict__[xsName]
theirXsData = other.__dict__[xsName]
if xsName == HIGHORDER_SCATTER:
for actualList, expectedList in zip(myXsData, theirXsData):
if actualList != expectedList:
equal = False
runLog.important(
" {} {:<30} cross section is different.".format(
self.source, xsName
)
)
elif sparse.issparse(myXsData) and sparse.issparse(theirXsData):
if not numpy.allclose(
myXsData.todense(),
theirXsData.todense(),
rtol=relativeTolerance,
atol=0.0,
):
verboseData = (
""
if not verbose
else "\n{},\n\n{}".format(myXsData, theirXsData)
)
runLog.important(
" {} {:<30} cross section is different.{}".format(
self.source, xsName, verboseData
)
)
equal = False
elif isinstance(myXsData, dict) and myXsData != theirXsData:
# there are no dicts currently so code is untested
raise NotImplementedError("there are no dicts")
elif not properties.areEqual(myXsData, theirXsData, relativeTolerance):
verboseData = (
"" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData)
)
runLog.important(
" {} {:<30} cross section is different.{}".format(
self.source, xsName, verboseData
)
)
equal = False
return equal
def merge(self, other):
"""
Merge the cross sections of two collections.
Notes
-----
1. This can only merge if one hasn't been assigned at all, because it doesn't try to figure out how to
account for overlapping cross sections.
2. Update the current library (self) with values from the other library if all attributes in the library except
ones in `attributesToIgnore` are None.
3. Libraries are already merged if all attributes in the other library are None (This is nothing to merge!).
"""
attributesToIgnore = ["source", HIGHORDER_SCATTER]
if all(
v is None for k, v in self.__dict__.items() if k not in attributesToIgnore
):
self.__dict__.update(other.__dict__) # See note 2
elif all(
v is None for k, v in other.__dict__.items() if k not in attributesToIgnore
):
pass # See note 3
else:
overlappingAttrs = set(
k for k, v in self.__dict__.items() if v is not None and k != "source"
)
overlappingAttrs &= set(
k for k, v in other.__dict__.items() if v is not None and k != "source"
)
raise exceptions.XSLibraryError(
"Cannot merge {} and {}.\n Cross sections overlap in "
"attributes: {}.".format(
self.source, other.source, ", ".join(overlappingAttrs)
)
)
raise exceptions.XSLibraryError(
"Cannot merge from and from \n Cross sections overlap in "
"attributes:."
)
class MacroscopicCrossSectionCreator(object):
"""
Create macroscopic cross sections from micros and number density.
Object encapsulating all high-level methods related to the creation of
macroscopic cross sections.
"""
def __init__(self, buildScatterMatrix=True, buildOnlyCoolant=False):
self.densities = None
self.macros = None
self.micros = None
self.buildScatterMatrix = buildScatterMatrix
self.buildOnlyCoolant = (
buildOnlyCoolant # TODO: this is not implemented yet. is it needed?
)
self.block = None
def createMacrosOnBlocklist(
self, microLibrary, blockList, nucNames=None, libType="micros"
):
for block in blockList:
block.macros = self.createMacrosFromMicros(
microLibrary, block, nucNames, libType=libType
)
return blockList
def createMacrosFromMicros(
self, microLibrary, block, nucNames=None, libType="micros"
):
"""
Creates a macroscopic cross section set based on a microscopic XS library using a block object
Micro libraries have lots of nuclides, but macros only have 1.
Parameters
----------
microLibrary : xsCollection.XSCollection
Input micros
block : Block
Object whos number densities should be used to generate macros
nucNames : list, optional
List of nuclides to include in the macros. Defaults to all in block.
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
Returns
-------
macros : xsCollection.XSCollection
A new XSCollection full of macroscopic cross sections
"""
runLog.debug("Building macroscopic cross sections for {0}".format(block))
if nucNames is None:
nucNames = block.getNuclides()
self.microLibrary = microLibrary
self.block = block
self.xsSuffix = block.getMicroSuffix()
self.macros = XSCollection(parent=block)
self.densities = dict(zip(nucNames, block.getNuclideNumberDensities(nucNames)))
self.ng = getattr(self.microLibrary, "numGroups" + _getLibTypeSuffix(libType))
self._initializeMacros()
self._convertBasicXS(libType=libType)
self._computeAbsorptionXS()
self._convertScatterMatrices(libType=libType)
self._computeDiffusionConstants()
self._buildTotalScatterMatrix()
self._computeRemovalXS()
self.macros.chi = computeBlockAverageChi(
b=self.block, isotxsLib=self.microLibrary
)
return self.macros
def _initializeMacros(self):
m = self.macros
for xsName in BASIC_XS + DERIVED_XS:
setattr(m, xsName, numpy.zeros(self.ng))
for matrixName in BASIC_SCAT_MATRIX:
# lil_matrices are good for indexing but bad for certain math operations.
# use csr for faster math
setattr(m, matrixName, sparse.csr_matrix((self.ng, self.ng)))
def _convertBasicXS(self, libType="micros"):
"""
Converts basic XS such as fission, nGamma, etc.
Parameters
----------
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
"""
reactions = BASIC_XS + TOTAL_XS
if NUSIGF in reactions:
reactions.remove(NUSIGF)
self.macros[NUSIGF] = computeMacroscopicGroupConstants(
FISSION_XS,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
multConstant=NU,
)
for reaction in reactions:
self.macros[reaction] = computeMacroscopicGroupConstants(
reaction,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
)
def _convertScatterMatrices(self, libType="micros"):
"""
Build macroscopic scatter matrices.
Parameters
----------
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
"""
if not self.buildScatterMatrix:
return
for nuclide in self.microLibrary.getNuclides(self.xsSuffix):
microCollection = getattr(nuclide, libType)
nDens = self.densities.get(nuclide.name, 0.0)
if microCollection.elasticScatter is not None:
self.macros.elasticScatter += microCollection.elasticScatter * nDens
if microCollection.inelasticScatter is not None:
self.macros.inelasticScatter += microCollection.inelasticScatter * nDens
if microCollection.n2nScatter is not None:
self.macros.n2nScatter += microCollection.n2nScatter * nDens
def _computeAbsorptionXS(self):
"""
Absorption = sum of all absorption reactions.
Must be called after :py:meth:`_convertBasicXS`.
"""
for absXS in self.macros.getAbsorptionXS():
self.macros.absorption += absXS
def _computeDiffusionConstants(self):
self.macros.diffusionConstants = 1.0 / (3.0 * self.macros.transport)
def _buildTotalScatterMatrix(self):
self.macros.totalScatter = self.macros.getTotalScatterMatrix()
def _computeRemovalXS(self):
"""
Compute removal cross section (things that remove a neutron from this phase space)
This includes all absorptions and outscattering.
Outscattering is represented by columns of the total scatter matrix.
Self-scattering (e.g. when g' == g) is not be included. This can be
handled by summing the columns and then subtracting the diagonal.
within-group n2n is accounted for by simply not including n2n in the removal xs.
"""
self.macros.removal = self.macros.absorption - self.macros.n2n
# columnSum = self.macros.totalScatter.columnSum(self.ng) # convert to ndarray
columnSum = self.macros.totalScatter.sum(axis=0).getA1() # convert to ndarray
# diags = self.macros.totalScatter.diagonal(self.ng)
diags = self.macros.totalScatter.diagonal()
self.macros.removal += columnSum - diags
def computeBlockAverageChi(b, isotxsLib):
r"""
Return the block average total chi vector based on isotope chi vectors.
This is defined by eq 3.4b in DIF3D manual [DIF3D]_, which corresponds to 1 in A.HMG4C card.
.. math::
\chi_g = \frac{\sum_{n} \chi_{g,n} N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'})}{\sum_n N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'} )}
To evaluate efficiently, assume that if :math:`\chi_{g,n}=0`, there will be no contributions
Volume is not used b/c it is already homogenized in the block.
Parameters
----------
b : object
Block object
isotxsLib : object
ISOTXS library object
Notes
-----
This methodology is based on option 1 in the HMG4C utility (named total
fission source weighting).
"""
numGroups = isotxsLib.numGroups
numerator = numpy.zeros(numGroups)
denominator = 0.0
numberDensities = b.getNumberDensities()
for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()):
nucMicroXS = nucObj.micros
nucNDens = numberDensities.get(nucObj.name, 0.0)
nuFissionTotal = sum(nucMicroXS.neutronsPerFission * nucMicroXS.fission)
numerator += nucMicroXS.chi * nucNDens * nuFissionTotal
denominator += nucNDens * nuFissionTotal
if denominator != 0.0:
return numerator / denominator
else:
return numpy.zeros(numGroups)
def _getLibTypeSuffix(libType):
if libType == "micros":
libTypeSuffix = ""
elif libType == "gammaXS":
libTypeSuffix = "Gamma"
else:
libTypeSuffix = None
runLog.warning(
"ARMI currently supports only micro XS libraries of types "
'"micros" (neutron) and "gammaXS" (gamma).'
)
return libTypeSuffix
def computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix):
"""
Compute the macroscopic neutron energy deposition group constants.
These group constants can be multiplied by the flux to obtain energy deposition rates.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
energyDepositionConsts : numpy array
Neutron energy deposition group constants. (J/cm)
Notes
-----
PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.
(eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)
Converted here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)
"""
return (
computeMacroscopicGroupConstants(
"neutronHeating", numberDensities, lib, microSuffix
)
* units.JOULES_PER_eV
)
def computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix):
"""
Compute the macroscopic gamma energy deposition group constants.
These group constants can be multiplied by the flux to obtain energy deposition rates.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
energyDepositionConsts : numpy array
gamma energy deposition group constants. (J/cm)
Notes
-----
PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.
(eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)
Convert here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)
"""
return (
computeMacroscopicGroupConstants(
"gammaHeating", numberDensities, lib, microSuffix
)
* units.JOULES_PER_eV
)
def computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix):
r"""
Get the fission energy generation group constant of a block
.. math::
E_{generation_fission} = \kappa_f \Sigma_f
Power comes from fission and capture reactions.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
fissionEnergyFactor: numpy.array
Fission energy generation group constants (in Joules/cm)
"""
fissionEnergyFactor = computeMacroscopicGroupConstants(
FISSION_XS,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_FISSION,
)
return fissionEnergyFactor
def computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix):
r"""
Get the energy generation group constant of a block
.. math::
E_{generation capture} = \kappa_c \Sigma_c
Typically, one only cares about the flux* this XS (to find total power),
but the XS itself is required in some sensitivity studies.
Power comes from fission and capture reactions.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
captureEnergyFactor: numpy.array
Capture energy generation group constants (in Joules/cm)
"""
captureEnergyFactor = None
for xs in CAPTURE_XS:
if captureEnergyFactor is None:
captureEnergyFactor = numpy.zeros(
numpy.shape(
computeMacroscopicGroupConstants(
xs, numberDensities, lib, microSuffix, libType="micros"
)
)
)
captureEnergyFactor += computeMacroscopicGroupConstants(
xs,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_CAPTURE,
)
return captureEnergyFactor
def computeMacroscopicGroupConstants(
constantName,
numberDensities,
lib,
microSuffix,
libType=None,
multConstant=None,
multLib=None,
):
"""
Compute any macroscopic group constants given number densities and a microscopic library.
Parameters
----------
constantName : str
Name of the reaction for which to obtain the group constants. This name should match a
cross section name or an attribute in the collection.
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
multConstant : str, optional
Name of constant by which the group constants will be multiplied. This name should match a
cross section name or an attribute in the collection.
multLib : library object, optional
Microscopic cross section nuclide library to obtain the multiplier from.
If None, same library as base cross section is used.
Returns
-------
macroGroupConstant : numpy array
Macroscopic group constants for the requested reaction.
"""
skippedNuclides = []
skippedMultNuclides = []
macroGroupConstants = None
# sort the numberDensities because a summation is being performed that may result in slight
# differences based on the order.
for nuclideName, numberDensity in sorted(numberDensities.items()):
try:
libNuclide = lib.getNuclide(nuclideName, microSuffix)
multLibNuclide = libNuclide
except KeyError:
skippedNuclides.append(nuclideName) # Nuclide does not exist in the library
continue
if multLib:
try:
multLibNuclide = multLib.getNuclide(nuclideName, microSuffix)
except KeyError:
skippedMultNuclides.append(
nuclideName
) # Nuclide does not exist in the library
continue
microGroupConstants = _getMicroGroupConstants(
libNuclide, constantName, nuclideName, libType
)
multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)
if macroGroupConstants is None:
macroGroupConstants = numpy.zeros(microGroupConstants.shape)
if (
microGroupConstants.shape != macroGroupConstants.shape
and not microGroupConstants.any()
):
microGroupConstants = numpy.zeros(macroGroupConstants.shape)
macroGroupConstants += (
numpy.asarray(numberDensity) * microGroupConstants * multiplierVal
)
if skippedNuclides:
runLog.error(
"Following nuclides are not in microscopic library {}: {}".format(
lib, skippedNuclides
),
single=True,
)
raise ValueError(
"Specified nuclides are not in microscopic library {}".format(lib)
)
if skippedMultNuclides:
runLog.debug(
"Following nuclides are not in multiplier library {}: {}".format(
multLib, skippedMultNuclides
),
single=True,
)
return macroGroupConstants
def _getXsMultiplier(libNuclide, multiplier, libType):
if multiplier:
try:
microCollection = getattr(libNuclide, libType)
multiplierVal = getattr(microCollection, multiplier)
except:
multiplierVal = libNuclide.isotxsMetadata[multiplier]
else:
multiplierVal = 1.0
return numpy.asarray(multiplierVal)
def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):
if libType:
microCollection = getattr(libNuclide, libType)
else:
microCollection = libNuclide
microGroupConstants = numpy.asarray(getattr(microCollection, constantName))
if not microGroupConstants.any():
runLog.debug(
"Nuclide {} does no have {} microscopic group constants.".format(
nuclideName, constantName
),
single=True,
)
return microGroupConstants
| 34.725191 | 132 | 0.625632 |
import numpy
from scipy import sparse
from armi import runLog
from armi.localization import exceptions
from armi.utils import properties
from armi.utils import units
NGAMMA = "nGamma"
NAPLHA = "nalph"
NP = "np"
ND = "nd"
NT = "nt"
FISSION_XS = "fission"
N2N_XS = "n2n"
NUSIGF = "nuSigF"
NU = "neutronsPerFission"
CAPTURE_XS = [NGAMMA, NAPLHA, NP, ND, NT]
BASIC_SCAT_MATRIX = ["elasticScatter", "inelasticScatter", "n2nScatter"]
OTHER_SCAT_MATRIX = ["totalScatter", "elasticScatter1stOrder"]
HIGHORDER_SCATTER = "higherOrderScatter"
ABSORPTION_XS = CAPTURE_XS + [FISSION_XS, N2N_XS]
BASIC_XS = ABSORPTION_XS + [NUSIGF]
DERIVED_XS = ["absorption", "removal"]
TOTAL_XS = ["total", "transport"]
ALL_XS = BASIC_XS + BASIC_SCAT_MATRIX + OTHER_SCAT_MATRIX + DERIVED_XS + TOTAL_XS
ALL_COLLECTION_DATA = ALL_XS + [
"chi",
NU,
"strpd",
HIGHORDER_SCATTER,
"diffusionConstants",
]
E_CAPTURE = "ecapt"
E_FISSION = "efiss"
class XSCollection(object):
_zeroes = {}
@classmethod
def getDefaultXs(cls, numGroups):
default = cls._zeroes.get(numGroups, None)
if default is None:
default = numpy.zeros(numGroups)
cls._zeroes[numGroups] = default
return default
def __init__(self, parent):
self.numGroups = None
self.transport = None
self.total = None
self.nGamma = None
self.fission = None
self.neutronsPerFission = None
self.chi = None
self.nalph = None
self.np = None
self.n2n = None
self.nd = None
self.nt = None
self.strpd = None
self.elasticScatter = None
self.inelasticScatter = None
self.n2nScatter = None
self.elasticScatter1stOrder = None
self.totalScatter = None
self.absorption = None
self.diffusionConstants = None
self.removal = None
self.nuSigF = None
self.higherOrderScatter = {}
self.source = "{}".format(parent)
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def get(self, key, default):
try:
return self[key]
except (IndexError, KeyError, TypeError):
return default
def getAbsorptionXS(self):
absXS = [
self.nGamma,
self.fission,
self.nalph,
self.np,
self.nd,
self.nt,
self.n2n,
]
return absXS
def getTotalScatterMatrix(self):
scatters = []
totalScatterComponents = {
"elastic": self.elasticScatter,
"inelastic": self.inelasticScatter,
"n2n": self.n2nScatter * 2.0,
}
for sType, sMatrix in totalScatterComponents.items():
if sMatrix is not None:
scatters.append(sMatrix)
else:
runLog.warning(
"{} scattering matrix in {} is not defined. Generating total scattering matrix"
" without this data".format(sType.title(), self),
single=True,
)
return sum(scatters)
def clear(self):
for xsAttr in ALL_XS:
value = getattr(self, xsAttr)
if isinstance(value, list):
value = [0.0] * len(value)
elif isinstance(value, numpy.ndarray):
value = numpy.zeros(value.shape)
elif value is None:
pass
elif value.nnz >= 0:
value = sparse.csr_matrix(value.shape)
setattr(self, xsAttr, value)
for kk, currentMatrix in self.higherOrderScatter.items():
self.higherOrderScatter[kk] = sparse.csr_matrix(currentMatrix.shape)
@staticmethod
def collapseCrossSection(crossSection, weights):
mult = numpy.array(crossSection) * numpy.array(weights)
return sum(mult) / sum(weights)
def compare(self, other, flux, relativeTolerance=0, verbose=False):
equal = True
for xsName in ALL_COLLECTION_DATA:
myXsData = self.__dict__[xsName]
theirXsData = other.__dict__[xsName]
if xsName == HIGHORDER_SCATTER:
for actualList, expectedList in zip(myXsData, theirXsData):
if actualList != expectedList:
equal = False
runLog.important(
" {} {:<30} cross section is different.".format(
self.source, xsName
)
)
elif sparse.issparse(myXsData) and sparse.issparse(theirXsData):
if not numpy.allclose(
myXsData.todense(),
theirXsData.todense(),
rtol=relativeTolerance,
atol=0.0,
):
verboseData = (
""
if not verbose
else "\n{},\n\n{}".format(myXsData, theirXsData)
)
runLog.important(
" {} {:<30} cross section is different.{}".format(
self.source, xsName, verboseData
)
)
equal = False
elif isinstance(myXsData, dict) and myXsData != theirXsData:
raise NotImplementedError("there are no dicts")
elif not properties.areEqual(myXsData, theirXsData, relativeTolerance):
verboseData = (
"" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData)
)
runLog.important(
" {} {:<30} cross section is different.{}".format(
self.source, xsName, verboseData
)
)
equal = False
return equal
def merge(self, other):
attributesToIgnore = ["source", HIGHORDER_SCATTER]
if all(
v is None for k, v in self.__dict__.items() if k not in attributesToIgnore
):
self.__dict__.update(other.__dict__)
elif all(
v is None for k, v in other.__dict__.items() if k not in attributesToIgnore
):
pass
else:
overlappingAttrs = set(
k for k, v in self.__dict__.items() if v is not None and k != "source"
)
overlappingAttrs &= set(
k for k, v in other.__dict__.items() if v is not None and k != "source"
)
raise exceptions.XSLibraryError(
"Cannot merge {} and {}.\n Cross sections overlap in "
"attributes: {}.".format(
self.source, other.source, ", ".join(overlappingAttrs)
)
)
raise exceptions.XSLibraryError(
"Cannot merge from and from \n Cross sections overlap in "
"attributes:."
)
class MacroscopicCrossSectionCreator(object):
def __init__(self, buildScatterMatrix=True, buildOnlyCoolant=False):
self.densities = None
self.macros = None
self.micros = None
self.buildScatterMatrix = buildScatterMatrix
self.buildOnlyCoolant = (
buildOnlyCoolant
)
self.block = None
def createMacrosOnBlocklist(
self, microLibrary, blockList, nucNames=None, libType="micros"
):
for block in blockList:
block.macros = self.createMacrosFromMicros(
microLibrary, block, nucNames, libType=libType
)
return blockList
def createMacrosFromMicros(
self, microLibrary, block, nucNames=None, libType="micros"
):
runLog.debug("Building macroscopic cross sections for {0}".format(block))
if nucNames is None:
nucNames = block.getNuclides()
self.microLibrary = microLibrary
self.block = block
self.xsSuffix = block.getMicroSuffix()
self.macros = XSCollection(parent=block)
self.densities = dict(zip(nucNames, block.getNuclideNumberDensities(nucNames)))
self.ng = getattr(self.microLibrary, "numGroups" + _getLibTypeSuffix(libType))
self._initializeMacros()
self._convertBasicXS(libType=libType)
self._computeAbsorptionXS()
self._convertScatterMatrices(libType=libType)
self._computeDiffusionConstants()
self._buildTotalScatterMatrix()
self._computeRemovalXS()
self.macros.chi = computeBlockAverageChi(
b=self.block, isotxsLib=self.microLibrary
)
return self.macros
def _initializeMacros(self):
m = self.macros
for xsName in BASIC_XS + DERIVED_XS:
setattr(m, xsName, numpy.zeros(self.ng))
for matrixName in BASIC_SCAT_MATRIX:
setattr(m, matrixName, sparse.csr_matrix((self.ng, self.ng)))
def _convertBasicXS(self, libType="micros"):
reactions = BASIC_XS + TOTAL_XS
if NUSIGF in reactions:
reactions.remove(NUSIGF)
self.macros[NUSIGF] = computeMacroscopicGroupConstants(
FISSION_XS,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
multConstant=NU,
)
for reaction in reactions:
self.macros[reaction] = computeMacroscopicGroupConstants(
reaction,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
)
def _convertScatterMatrices(self, libType="micros"):
if not self.buildScatterMatrix:
return
for nuclide in self.microLibrary.getNuclides(self.xsSuffix):
microCollection = getattr(nuclide, libType)
nDens = self.densities.get(nuclide.name, 0.0)
if microCollection.elasticScatter is not None:
self.macros.elasticScatter += microCollection.elasticScatter * nDens
if microCollection.inelasticScatter is not None:
self.macros.inelasticScatter += microCollection.inelasticScatter * nDens
if microCollection.n2nScatter is not None:
self.macros.n2nScatter += microCollection.n2nScatter * nDens
def _computeAbsorptionXS(self):
for absXS in self.macros.getAbsorptionXS():
self.macros.absorption += absXS
def _computeDiffusionConstants(self):
self.macros.diffusionConstants = 1.0 / (3.0 * self.macros.transport)
def _buildTotalScatterMatrix(self):
self.macros.totalScatter = self.macros.getTotalScatterMatrix()
def _computeRemovalXS(self):
self.macros.removal = self.macros.absorption - self.macros.n2n
self.macros.totalScatter.sum(axis=0).getA1()
diags = self.macros.totalScatter.diagonal()
self.macros.removal += columnSum - diags
def computeBlockAverageChi(b, isotxsLib):
numGroups = isotxsLib.numGroups
numerator = numpy.zeros(numGroups)
denominator = 0.0
numberDensities = b.getNumberDensities()
for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()):
nucMicroXS = nucObj.micros
nucNDens = numberDensities.get(nucObj.name, 0.0)
nuFissionTotal = sum(nucMicroXS.neutronsPerFission * nucMicroXS.fission)
numerator += nucMicroXS.chi * nucNDens * nuFissionTotal
denominator += nucNDens * nuFissionTotal
if denominator != 0.0:
return numerator / denominator
else:
return numpy.zeros(numGroups)
def _getLibTypeSuffix(libType):
if libType == "micros":
libTypeSuffix = ""
elif libType == "gammaXS":
libTypeSuffix = "Gamma"
else:
libTypeSuffix = None
runLog.warning(
"ARMI currently supports only micro XS libraries of types "
'"micros" (neutron) and "gammaXS" (gamma).'
)
return libTypeSuffix
def computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix):
return (
computeMacroscopicGroupConstants(
"neutronHeating", numberDensities, lib, microSuffix
)
* units.JOULES_PER_eV
)
def computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix):
return (
computeMacroscopicGroupConstants(
"gammaHeating", numberDensities, lib, microSuffix
)
* units.JOULES_PER_eV
)
def computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix):
fissionEnergyFactor = computeMacroscopicGroupConstants(
FISSION_XS,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_FISSION,
)
return fissionEnergyFactor
def computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix):
captureEnergyFactor = None
for xs in CAPTURE_XS:
if captureEnergyFactor is None:
captureEnergyFactor = numpy.zeros(
numpy.shape(
computeMacroscopicGroupConstants(
xs, numberDensities, lib, microSuffix, libType="micros"
)
)
)
captureEnergyFactor += computeMacroscopicGroupConstants(
xs,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_CAPTURE,
)
return captureEnergyFactor
def computeMacroscopicGroupConstants(
constantName,
numberDensities,
lib,
microSuffix,
libType=None,
multConstant=None,
multLib=None,
):
skippedNuclides = []
skippedMultNuclides = []
macroGroupConstants = None
for nuclideName, numberDensity in sorted(numberDensities.items()):
try:
libNuclide = lib.getNuclide(nuclideName, microSuffix)
multLibNuclide = libNuclide
except KeyError:
skippedNuclides.append(nuclideName)
continue
if multLib:
try:
multLibNuclide = multLib.getNuclide(nuclideName, microSuffix)
except KeyError:
skippedMultNuclides.append(
nuclideName
)
continue
microGroupConstants = _getMicroGroupConstants(
libNuclide, constantName, nuclideName, libType
)
multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)
if macroGroupConstants is None:
macroGroupConstants = numpy.zeros(microGroupConstants.shape)
if (
microGroupConstants.shape != macroGroupConstants.shape
and not microGroupConstants.any()
):
microGroupConstants = numpy.zeros(macroGroupConstants.shape)
macroGroupConstants += (
numpy.asarray(numberDensity) * microGroupConstants * multiplierVal
)
if skippedNuclides:
runLog.error(
"Following nuclides are not in microscopic library {}: {}".format(
lib, skippedNuclides
),
single=True,
)
raise ValueError(
"Specified nuclides are not in microscopic library {}".format(lib)
)
if skippedMultNuclides:
runLog.debug(
"Following nuclides are not in multiplier library {}: {}".format(
multLib, skippedMultNuclides
),
single=True,
)
return macroGroupConstants
def _getXsMultiplier(libNuclide, multiplier, libType):
if multiplier:
try:
microCollection = getattr(libNuclide, libType)
multiplierVal = getattr(microCollection, multiplier)
except:
multiplierVal = libNuclide.isotxsMetadata[multiplier]
else:
multiplierVal = 1.0
return numpy.asarray(multiplierVal)
def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):
if libType:
microCollection = getattr(libNuclide, libType)
else:
microCollection = libNuclide
microGroupConstants = numpy.asarray(getattr(microCollection, constantName))
if not microGroupConstants.any():
runLog.debug(
"Nuclide {} does no have {} microscopic group constants.".format(
nuclideName, constantName
),
single=True,
)
return microGroupConstants
| true | true |
1c35e12dd91b8c2d125fe4cdf0bb30487f6fce29 | 1,434 | py | Python | src/data/295.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/295.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/295.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import queue
ROAD = "Road"
TOWN = "Town"
def solve(N: int, Q: int, A: "List[int]", B: "List[int]", C: "List[int]",
D: "List[int]"):
G = [[] for _ in range(N)]
for i in range(N - 1):
G[A[i] - 1].append(B[i] - 1)
G[B[i] - 1].append(A[i] - 1)
color = [-1 for _ in range(N)]
color[0] = 0
que = queue.Queue()
que.put(0)
while not que.empty():
i = que.get()
for j in G[i]:
if color[j] == -1:
color[j] = 1 - color[i]
que.put(j)
for i in range(Q):
if color[C[i] - 1] == color[D[i] - 1]:
print(TOWN)
else:
print(ROAD)
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
# Failed to predict input format
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens))
Q = int(next(tokens))
A = []
B = []
C = []
D = []
for i in range(N - 1):
A.append(int(next(tokens)))
B.append(int(next(tokens)))
for j in range(Q):
C.append(int(next(tokens)))
D.append(int(next(tokens)))
solve(N, Q, A, B, C, D)
if __name__ == '__main__':
main()
| 24.724138 | 167 | 0.504184 |
import sys
import queue
ROAD = "Road"
TOWN = "Town"
def solve(N: int, Q: int, A: "List[int]", B: "List[int]", C: "List[int]",
D: "List[int]"):
G = [[] for _ in range(N)]
for i in range(N - 1):
G[A[i] - 1].append(B[i] - 1)
G[B[i] - 1].append(A[i] - 1)
color = [-1 for _ in range(N)]
color[0] = 0
que = queue.Queue()
que.put(0)
while not que.empty():
i = que.get()
for j in G[i]:
if color[j] == -1:
color[j] = 1 - color[i]
que.put(j)
for i in range(Q):
if color[C[i] - 1] == color[D[i] - 1]:
print(TOWN)
else:
print(ROAD)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens))
Q = int(next(tokens))
A = []
B = []
C = []
D = []
for i in range(N - 1):
A.append(int(next(tokens)))
B.append(int(next(tokens)))
for j in range(Q):
C.append(int(next(tokens)))
D.append(int(next(tokens)))
solve(N, Q, A, B, C, D)
if __name__ == '__main__':
main()
| true | true |
1c35e15227e07b2da36653408049b4abd015e2f2 | 4,756 | py | Python | inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2021-04-06T03:32:12.000Z | 2021-04-06T03:32:12.000Z | inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 28 | 2021-09-24T09:29:02.000Z | 2022-03-28T13:20:46.000Z | inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2020-08-30T11:48:03.000Z | 2020-08-30T11:48:03.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from openvino.inference_engine import IECore
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to an image files", required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
"kernels implementations", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified. Default value is CPU", default="CPU",
type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
args.add_argument("--mean_val_r", "-mean_val_r",
help="Optional. Mean value of red channel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_g", "-mean_val_g",
help="Optional. Mean value of green channel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_b", "-mean_val_b",
help="Optional. Mean value of blue channel for mean value subtraction in postprocessing ", default=0,
type=float)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
model = args.model
log.info(f"Loading network:\n\t{model}")
net = ie.read_network(model=model)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
net.batch_size = len(args.input)
# Read and pre-process input images
n, c, h, w = net.input_info[input_blob].input_data.shape
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
image = cv2.imread(args.input[i])
if image.shape[:-1] != (h, w):
log.warning(f"Image {args.input[i]} is resized from {image.shape[:-1]} to {(h, w)}")
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
images[i] = image
log.info(f"Batch size is {n}")
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
# Start sync inference
log.info("Starting inference")
res = exec_net.infer(inputs={input_blob: images})
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
# Post process output
for batch, data in enumerate(res):
# Clip values to [0, 255] range
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
data[data < 0] = 0
data[data > 255] = 255
data = data[::] - (args.mean_val_r, args.mean_val_g, args.mean_val_b)
out_img = os.path.join(os.path.dirname(__file__), f"out_{batch}.bmp")
cv2.imwrite(out_img, data)
log.info(f"Result image was saved to {out_img}")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n")
if __name__ == '__main__':
sys.exit(main() or 0)
| 45.295238 | 133 | 0.643818 |
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser, SUPPRESS
import cv2
import numpy as np
import logging as log
from openvino.inference_engine import IECore
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to an image files", required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
help="Optional. Required for CPU custom layers. "
"Absolute MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the "
"kernels implementations", type=str, default=None)
args.add_argument("-d", "--device",
help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified. Default value is CPU", default="CPU",
type=str)
args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
args.add_argument("--mean_val_r", "-mean_val_r",
help="Optional. Mean value of red channel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_g", "-mean_val_g",
help="Optional. Mean value of green channel for mean value subtraction in postprocessing ", default=0,
type=float)
args.add_argument("--mean_val_b", "-mean_val_b",
help="Optional. Mean value of blue channel for mean value subtraction in postprocessing ", default=0,
type=float)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
model = args.model
log.info(f"Loading network:\n\t{model}")
net = ie.read_network(model=model)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
net.batch_size = len(args.input)
n, c, h, w = net.input_info[input_blob].input_data.shape
images = np.ndarray(shape=(n, c, h, w))
for i in range(n):
image = cv2.imread(args.input[i])
if image.shape[:-1] != (h, w):
log.warning(f"Image {args.input[i]} is resized from {image.shape[:-1]} to {(h, w)}")
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
images[i] = image
log.info(f"Batch size is {n}")
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Starting inference")
res = exec_net.infer(inputs={input_blob: images})
log.info("Processing output blob")
res = res[out_blob]
for batch, data in enumerate(res):
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
data = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
data[data < 0] = 0
data[data > 255] = 255
data = data[::] - (args.mean_val_r, args.mean_val_g, args.mean_val_b)
out_img = os.path.join(os.path.dirname(__file__), f"out_{batch}.bmp")
cv2.imwrite(out_img, data)
log.info(f"Result image was saved to {out_img}")
log.info("This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n")
if __name__ == '__main__':
sys.exit(main() or 0)
| true | true |
1c35e1745f7d2094a35073536ccef72aac135bfc | 11,021 | py | Python | dataset/datasets.py | Shuai-Xie/structure_knowledge_distillation | a5a0897f01e16d71dc4e3c77d4ac926fb0cd532d | [
"BSD-2-Clause"
] | null | null | null | dataset/datasets.py | Shuai-Xie/structure_knowledge_distillation | a5a0897f01e16d71dc4e3c77d4ac926fb0cd532d | [
"BSD-2-Clause"
] | null | null | null | dataset/datasets.py | Shuai-Xie/structure_knowledge_distillation | a5a0897f01e16d71dc4e3c77d4ac926fb0cd532d | [
"BSD-2-Clause"
] | null | null | null | import os
import os.path as osp
import numpy as np
import random
import collections
import torch
import torchvision
import cv2
from torch.utils import data
class VOCDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name)
label_file = osp.join(self.root, "SegmentationClassAug/%s.png" % name)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
label = np.asarray(label_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
# image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class VOCDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = mean
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, name, size
class CSDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
print('{} images are loaded!'.format(len(self.img_ids)))
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.7 + random.randint(0, 14) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = self.id2trainId(label)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
label = np.asarray(label_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
# image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class CSDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = (104.00698793, 116.66876762, 122.67891434)
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
self.files = []
# for split in ["train", "trainval", "val"]:
for item in self.img_ids:
image_path = item[0]
name = osp.splitext(osp.basename(image_path))[0]
img_file = osp.join(self.root, image_path)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, size, name
| 43.734127 | 144 | 0.545867 | import os
import os.path as osp
import numpy as np
import random
import collections
import torch
import torchvision
import cv2
from torch.utils import data
class VOCDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for name in self.img_ids:
img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name)
label_file = osp.join(self.root, "SegmentationClassAug/%s.png" % name)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
label = np.asarray(label_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class VOCDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = mean
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, name, size
class CSDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
if not max_iters == None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
print('{} images are loaded!'.format(len(self.img_ids)))
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.7 + random.randint(0, 14) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = self.id2trainId(label)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
label = np.asarray(label_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class CSDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = (104.00698793, 116.66876762, 122.67891434)
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
self.files = []
for item in self.img_ids:
image_path = item[0]
name = osp.splitext(osp.basename(image_path))[0]
img_file = osp.join(self.root, image_path)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, size, name
| true | true |
1c35e1fefd60a71454bd7c59d580149fde83be33 | 10,195 | py | Python | orthanc/ai-orchestrator.py | ImagingInformatics/orthanc-ai-orchestrator | 15396f160a51da1916513308d4e7e813e615ba58 | [
"MIT"
] | 9 | 2021-05-05T17:13:31.000Z | 2022-01-07T20:00:38.000Z | orthanc/ai-orchestrator.py | deepmd-tools/orthanc-ai-orchestrator | 15396f160a51da1916513308d4e7e813e615ba58 | [
"MIT"
] | null | null | null | orthanc/ai-orchestrator.py | deepmd-tools/orthanc-ai-orchestrator | 15396f160a51da1916513308d4e7e813e615ba58 | [
"MIT"
] | 1 | 2021-07-14T18:08:57.000Z | 2021-07-14T18:08:57.000Z | import orthanc,pprint,json,datetime,random,sys
#TODO store things into the DB
#TODO set a timer to automatically expire workitems after a given amount of time?
###############################################################################
# GLOBALS
###############################################################################
WORKITEMS = dict()
DICOM_UID_ROOT = '2.7446.76257' # 2.SIIM.ROCKS
STATE_SCHEDULED = "SCHEDULED"
STATE_IN_PROGRESS = "IN PROGRESS"
STATE_COMPLETED = "COMPLETED"
STATE_CANCELED = "CANCELED"
STATES = [STATE_SCHEDULED, STATE_CANCELED, STATE_COMPLETED, STATE_IN_PROGRESS]
REQUIRED_TAGS = ['00080016','00080018','00081195','00081199','00100010','00100020','00100030','00100040','0020000D','00404041','0040A370','0040E025','00404005','00741000','00741200','00741204']
###############################################################################
# ORTHANC EVENT HOOKS
###############################################################################
# List all work
def listOrCreateWorkitems(output, uri, **request):
if request['method'] == 'GET':
#TODO Add support for filtering via a GET query
output.AnswerBuffer(json.dumps(list(WORKITEMS.values())), 'application/dicom+json')
if request['method'] == 'POST':
try:
workitem = json.loads(request['body'])
missingAttributes = checkRequiredTagsPresent(workitem)
# Check this new object has the bare-minimum tags/attributes
if len(missingAttributes) > 0:
msg = "Your new object is missing the following attribute(s): " + ", ".join(missingAttributes)
output.SendHttpStatus(400, msg, len(msg))
return
# Check this study is NOT already listed
if checkStudyUIDExists(workitem['0020000D']['Value'][0]):
msg = "This study is already listed as a workitem"
output.SendHttpStatus(400, msg, len(msg))
return
# If all successfull so far, store the item
workitemId = getDicomIdentifier()
WORKITEMS[workitemId] = workitem
output.AnswerBuffer(json.dumps(WORKITEMS[workitemId]), 'application/dicom+json')
except:
errorInfo = sys.exc_info()
msg = "Unknown error occurred, might be caused by invalid data input. Error message was: " + errorInfo[0]
print("Unhandled error while attempting to create a workitem manually: " + errorInfo[0])
print(errorInfo[2])
output.SendHttpStatus(500, msg, len(msg))
return
else:
output.SendMethodNotAllowed('GET,POST')
return
orthanc.RegisterRestCallback('/ai-orchestrator/workitems', listOrCreateWorkitems)
def getWorkitem(output, uri, **request):
if request['method'] != 'GET':
output.SendMethodNotAllowed('GET')
return
workitemId = request['groups'][0]
if (workitemId not in WORKITEMS):
msg = "No workitem found matching the ID supplied: " + workitemId
output.SendHttpStatus(404, msg, len(msg))
return
output.AnswerBuffer(json.dumps(WORKITEMS[workitemId]), 'application/dicom+json')
orthanc.RegisterRestCallback('/ai-orchestrator/workitems/([0-9\\.]*)', getWorkitem)
def changeWorkItemState(output, uri, **request):
if request['method'] != 'PUT':
output.SendMethodNotAllowed('PUT')
return
workitemId = request['groups'][0]
if (workitemId not in WORKITEMS):
msg = "No workitem found matching the ID supplied: " + workitemId
output.SendHttpStatus(404, msg, len(msg))
return
# Check the integrity of the new object
new = json.loads(request['body'])
old = WORKITEMS[workitemId]
missingAttributes = checkRequiredTagsPresent(new)
# Check this new object has the bare-minimum tags/attributes
if len(missingAttributes) > 0:
msg = "Your new object is missing the following attribute(s): " + ", ".join(missingAttributes)
output.SendHttpStatus(400, msg, len(msg))
return
# Next check, the status should be one of the known statuses
if new['00741000']['Value'][0] not in STATES:
msg = "Your object's ProcedureStepState (00741000) must be one of: " + ", ".join(STATES)
output.SendHttpStatus(400, msg, len(msg))
return
# Check the correct succession of states (scheduled -> in progress (OR canceled) -> completed OR canceled)
oldState = old['00741000']['Value'][0]
newState = new['00741000']['Value'][0]
if oldState == STATE_SCHEDULED and (newState != STATE_IN_PROGRESS and newState != STATE_CANCELED):
msg = "A workitem that is currently in SCHEDULED state can only move to IN PROGRESS or CANCELED"
output.SendHttpStatus(400, msg, len(msg))
return
if oldState == STATE_IN_PROGRESS and (newState != STATE_COMPLETED and newState != STATE_CANCELED):
msg = "A workitem that is currently in IN PROGRESS state can only move to COMPLETED or CANCELED"
output.SendHttpStatus(400, msg, len(msg))
return
# If successful - store the new object
WORKITEMS[workitemId] = new
output.AnswerBuffer(json.dumps(WORKITEMS[workitemId]), 'application/dicom+json')
orthanc.RegisterRestCallback('/ai-orchestrator/workitems/([0-9\\.]*)/state', changeWorkItemState)
def OnChange(changeType, level, resourceId):
if changeType == orthanc.ChangeType.ORTHANC_STARTED: # Server start-up
print('AI-orchestrator plugin running!')
if changeType == orthanc.ChangeType.STABLE_STUDY: # Study has stopped receiving news instances/series
print('Stable study: %s' % resourceId)
# Get more information about this study
study = json.loads(orthanc.RestApiGet('/studies/' + resourceId))
studyUid = study['MainDicomTags']['StudyInstanceUID']
series = []
bodyPart = None
modality = None
# Check this study is NOT already listed
if checkStudyUIDExists(studyUid):
print("This study is already listed as a workitem")
return
# Loop through the series within this study, and get additional attributes for each
for seriesId in study['Series']:
data = json.loads(orthanc.RestApiGet('/series/' + seriesId + '/shared-tags'))
series.append(data)
if( bodyPart == None ):
bodyPart = str(data['0018,0015']['Value'])
modality = str(data['0008,0060']['Value'])
# TODO improve this to be more dynamic
pipline = bodyPart.lower() + '-' + modality.lower() + '-pipeline'
# Create a workitem for this study
workitemId = getDicomIdentifier()
workitem = {
'00080016': {'vr':'UI', 'Value': ['1.2.840.10008.5.1.4.34.6.1']}, # SOPClassUID
'00080018': {'vr':'UI', 'Value': [workitemId]}, # SOPInstanceUID
'00081195': {'vr':'UI', 'Value': ['']}, # UI [] TransactionUID
'00081199': {'vr':'SQ', 'Value': [
# This repeats for every series within the target study, so it is handled in a loop below
]}, # ReferencedSOPSequence
'00100010': {'vr':'PN', 'Value': [study['PatientMainDicomTags']['PatientName']]}, # PatientName
'00100020': {'vr':'LO', 'Value': [study['PatientMainDicomTags']['PatientID']]}, # PatientID
'00100030': {'vr':'DA', 'Value': [study['PatientMainDicomTags']['PatientBirthDate']]}, # PatientBirthDate
'00100040': {'vr':'CS', 'Value': [study['PatientMainDicomTags']['PatientSex']]}, # PatientSex
'0020000D': {'vr':'UI', 'Value': [studyUid]}, # Study Instance UID
'00404041': {'vr':'CS', 'Value': ['READY']}, # InputReadinessState
'0040A370': {'vr':'SQ', 'Value': [{
'00080050': {'vr': 'UI', 'Value': [study['MainDicomTags']['AccessionNumber']]}, #AccessionNumber
'0020000D': {'vr': 'UI', 'Value': [studyUid]}, # Study Instance UID
}]}, # SQ ReferencedRequestSequence
'0040E025': {'vr':'SQ', 'Value': [{
'00081190': {'vr': 'LO', 'Value': ['http://localhost:8042/dicom-web/studies/' + studyUid]}, # Retrieve URL
}]}, # WADORSRetrievalSequence
'00404005': {'vr':'DT', 'Value': [getDicomDate()]}, # Scheduled Procedure Step Start DateTime
'00741000': {'vr':'CS', 'Value': [STATE_SCHEDULED]}, # ProcedureStepState
'00741200': {'vr':'CS', 'Value': ['MEDIUM']}, # ScheduledProcedureStepPriority
'00741204': {'vr':'LO', 'Value': [pipline]}, # ProcedureStepLabel
}
for curSeries in series:
workitem['00081199']['Value'].append({
'00081150': {'vr': 'UI', 'Value': [curSeries['0008,0016']['Value']]}, # ReferencedSOPClassUID
'00081155': {'vr': 'UI', 'Value': [curSeries['0020,000e']['Value']]}, # ReferencedSeriesUID
})
WORKITEMS[workitemId] = workitem
#pprint.pprint(workitem)
orthanc.RegisterOnChangeCallback(OnChange)
###############################################################################
# UTILITY METHODS
###############################################################################
# Create a random DICOM UID
def getDicomIdentifier():
uid = DICOM_UID_ROOT
parts = random.randint(3,6)
i = 0
while i < parts:
uid += '.' + str(random.randint(1,999999999))
i += 1
return uid
# Return DICOM-formatted date. If not date provided, it defaults to now
def getDicomDate(date=None):
if( date == None ):
date = datetime.datetime.now()
return date.strftime('%Y%m%d%H%M%S')
# Check a given study is NOT already listed
def checkStudyUIDExists(studyUid):
for workitem in WORKITEMS.values():
if studyUid == workitem['0020000D']['Value'][0]:
return True
return False
# Check a new/update workitem object to have the bare-minimum attributes
def checkRequiredTagsPresent(workitem):
missingAttributes = []
for key in REQUIRED_TAGS:
if key not in workitem:
missingAttributes.append(key)
return missingAttributes | 45.311111 | 193 | 0.601766 | import orthanc,pprint,json,datetime,random,sys
| true | true |
1c35e2c92711b4fa0b92f119506a86a49d3074e1 | 2,056 | py | Python | data_loader.py | donghaW/RCF-pytorch | 6380209ef747abefa87637e60d33369ba423814d | [
"MIT"
] | null | null | null | data_loader.py | donghaW/RCF-pytorch | 6380209ef747abefa87637e60d33369ba423814d | [
"MIT"
] | null | null | null | data_loader.py | donghaW/RCF-pytorch | 6380209ef747abefa87637e60d33369ba423814d | [
"MIT"
] | null | null | null | from os.path import join
import cv2
import numpy as np
from PIL import Image
from torch.utils import data
def prepare_image_PIL(im):
im = im[:,:,::-1] - np.zeros_like(im) # rgb to bgr
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
def prepare_image_cv2(im):
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
class BSDS_RCFLoader(data.Dataset):
"""
Dataloader BSDS500
"""
def __init__(self, root='data/HED-BSDS_PASCAL', split='train', transform=False):
self.root = root
self.split = split
self.transform = transform
if self.split == 'train':
self.filelist = join(self.root, 'bsds_pascal_train_pair.lst')
elif self.split == 'test':
self.filelist = join(self.root, 'test.lst')
else:
raise ValueError("Invalid split type!")
with open(self.filelist, 'r') as f:
self.filelist = f.readlines()
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
if self.split == "train":
img_file, lb_file = self.filelist[index].split()
lb = np.array(Image.open(join(self.root, lb_file)), dtype=np.float32)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = lb[np.newaxis, :, :]
lb[lb == 0] = 0
lb[np.logical_and(lb>0, lb<128)] = 2
lb[lb >= 128] = 1
else:
img_file = self.filelist[index].rstrip()
if self.split == "train":
img = np.array(cv2.imread(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_cv2(img)
return img, lb
else:
img = np.array(Image.open(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_PIL(img)
return img
| 31.630769 | 84 | 0.555934 | from os.path import join
import cv2
import numpy as np
from PIL import Image
from torch.utils import data
def prepare_image_PIL(im):
im = im[:,:,::-1] - np.zeros_like(im)
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1))
return im
def prepare_image_cv2(im):
im -= np.array((104.00698793,116.66876762,122.67891434))
im = np.transpose(im, (2, 0, 1))
return im
class BSDS_RCFLoader(data.Dataset):
def __init__(self, root='data/HED-BSDS_PASCAL', split='train', transform=False):
self.root = root
self.split = split
self.transform = transform
if self.split == 'train':
self.filelist = join(self.root, 'bsds_pascal_train_pair.lst')
elif self.split == 'test':
self.filelist = join(self.root, 'test.lst')
else:
raise ValueError("Invalid split type!")
with open(self.filelist, 'r') as f:
self.filelist = f.readlines()
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
if self.split == "train":
img_file, lb_file = self.filelist[index].split()
lb = np.array(Image.open(join(self.root, lb_file)), dtype=np.float32)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = lb[np.newaxis, :, :]
lb[lb == 0] = 0
lb[np.logical_and(lb>0, lb<128)] = 2
lb[lb >= 128] = 1
else:
img_file = self.filelist[index].rstrip()
if self.split == "train":
img = np.array(cv2.imread(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_cv2(img)
return img, lb
else:
img = np.array(Image.open(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_PIL(img)
return img
| true | true |
1c35e2d23130e4eaecba82b03fed6a92ad07b2b7 | 318 | py | Python | Exercicios-Python/034.py | LuizHenriqudesouza419/Exercicios-de-Python3-main | af53cc1eea1e22a304e206a453c4b24bf67615a8 | [
"MIT"
] | 1 | 2021-11-08T22:59:33.000Z | 2021-11-08T22:59:33.000Z | Exercicios-Python/034.py | LuizHenriqudesouza419/Exercicios-de-Python3-main | af53cc1eea1e22a304e206a453c4b24bf67615a8 | [
"MIT"
] | null | null | null | Exercicios-Python/034.py | LuizHenriqudesouza419/Exercicios-de-Python3-main | af53cc1eea1e22a304e206a453c4b24bf67615a8 | [
"MIT"
] | null | null | null | #Aumento salarial
salário = float(input('Qual é o seu salario ? '))
if salário <= 1250.00:
novo = salário + (salário * 15 / 100)
print('Seu novo salário com 15% de aumento ficou {}'.format(novo))
else:
novo = salário + (salário * 10 / 100)
print('Seu salário com 10% de aumento ficou {}'.format(novo))
| 35.333333 | 70 | 0.644654 |
salário = float(input('Qual é o seu salario ? '))
if salário <= 1250.00:
novo = salário + (salário * 15 / 100)
print('Seu novo salário com 15% de aumento ficou {}'.format(novo))
else:
novo = salário + (salário * 10 / 100)
print('Seu salário com 10% de aumento ficou {}'.format(novo))
| true | true |
1c35e3eabade235dc8728686a9d4af7050c07687 | 535 | py | Python | creational/factory_method/product.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null | creational/factory_method/product.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null | creational/factory_method/product.py | pascalweiss/gof_design_patterns | d142ebf21bb1a1e7925b0e7915eb6d857df58299 | [
"Apache-2.0"
] | null | null | null |
# --- Product ---
class Tree:
def get_name(self):
raise NotImplementedError
# --- ConcreteProduct ---
class CorkOak(Tree):
def get_name(self):
return "cork oak"
class Olive(Tree):
def get_name(self):
return "olive"
class Cypress(Tree):
def get_name(self):
return "cypress"
class Spruce(Tree):
def get_name(self):
return "spruce"
class Pine(Tree):
def get_name(self):
return "pine"
class Beech(Tree):
def get_name(self):
return "beech"
| 13.375 | 33 | 0.594393 |
class Tree:
def get_name(self):
raise NotImplementedError
class CorkOak(Tree):
def get_name(self):
return "cork oak"
class Olive(Tree):
def get_name(self):
return "olive"
class Cypress(Tree):
def get_name(self):
return "cypress"
class Spruce(Tree):
def get_name(self):
return "spruce"
class Pine(Tree):
def get_name(self):
return "pine"
class Beech(Tree):
def get_name(self):
return "beech"
| true | true |
1c35e4716c9deef36f0e17b14d4627fc149e166c | 3,884 | py | Python | height/curves/female_months.py | VictorArnaud/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | null | null | null | height/curves/female_months.py | VictorArnaud/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | 8 | 2018-05-17T22:50:18.000Z | 2018-05-19T01:15:26.000Z | height/curves/female_months.py | VWApplications/sdcurve | d5397b0193fb01e94dc93c9fad5e2db195754384 | [
"MIT"
] | 1 | 2018-06-27T00:18:21.000Z | 2018-06-27T00:18:21.000Z | class HeightCurveFemaleMonths(object):
"""
Height-based growth curve for females aged 0 to 36 months
"""
def __init__(self):
"""
Growth curve based on the height of female children with Down Syndrome
constructor.
"""
self.title = "Height-based growth curve for females aged 0 to 36 months"
self.ages = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36]
self.percentis_3 = [41.54, 44.24, 46.88, 49.40, 51.74, 53.89, 55.84, 57.60,
59.18, 60.60, 61.90, 63.08, 64.18, 65.20, 66.17, 67.09,
67.96, 68.80, 69.61, 70.38, 71.13, 71.84, 72.53, 73.20,
73.85, 74.49, 75.11, 75.72, 76.32, 76.91, 77.49, 78.08,
78.65, 79.23, 79.79, 80.36, 80.92]
self.percentis_10 = [43.84, 46.51, 49.13, 51.62, 53.94, 56.07, 58.00, 59.73,
61.29, 62.69, 63.96, 65.13, 66.21, 67.23, 68.19, 69.10,
69.98, 70.82, 71.64, 72.43, 73.19, 73.92, 74.64, 75.33,
76.01, 76.68, 77.33, 77.98, 78.62, 79.25, 79.88, 80.51,
81.13, 81.76, 82.38, 82.99, 83.60]
self.percentis_25 = [45.87, 48.57, 51.21, 53.73, 56.06, 58.19, 60.12, 61.85,
63.40, 64.79, 66.06, 67.22, 68.29, 69.30, 70.26, 71.18,
72.06, 72.91, 73.74, 74.54, 75.31, 76.07, 76.80, 77.52,
78.23, 78.92, 79.60, 80.28, 80.94, 81.61, 82.27, 82.93,
83.58, 84.24, 84.89, 85.54, 86.19]
self.percentis_50 = [47.70, 50.46, 53.16, 55.72, 58.10, 60.26, 62.21, 63.95,
65.51, 66.91, 68.17, 69.34, 70.41, 71.42, 72.39, 73.31,
74.20, 75.06, 75.90, 76.72, 77.51, 78.28, 79.03, 79.77,
80.49, 81.21, 81.91, 82.60, 83.29, 83.97, 84.65, 85.33,
86.00, 86.68, 87.35, 88.01, 88.68]
self.percentis_75 = [49.37, 52.22, 54.99, 57.63, 60.06, 62.27, 64.26, 66.04,
67.62, 69.04, 70.32, 71.49, 72.58, 73.60, 74.57, 75.51,
76.41, 77.28, 78.14, 78.97, 79.77, 80.56, 81.33, 82.08,
82.82, 83.54, 84.26, 84.96, 85.65, 86.34, 87.03, 87.71,
88.39, 89.07, 89.75, 90.42, 91.09]
self.percentis_90 = [50.91, 53.85, 56.73, 59.45, 61.96, 64.24, 66.29, 68.12,
69.74, 71.18, 72.49, 73.68, 74.79, 75.83, 76.81, 77.76,
78.68, 79.57, 80.44, 81.29, 82.11, 82.91, 83.69, 84.45,
85.20, 85.93, 86.64, 87.35, 88.04, 88.73, 89.40, 90.08,
90.76, 91.43, 92.09, 92.76, 93.42]
self.percentis_97 = [52.34, 55.39, 58.37, 61.20, 63.80, 66.17, 68.29, 70.18,
71.85, 73.34, 74.69, 75.91, 77.05, 78.11, 79.12, 80.09,
81.03, 81.94, 82.83, 83.69, 84.53, 85.34, 86.13, 86.89,
87.64, 88.37, 89.08, 89.77, 90.45, 91.12, 91.78, 92.44,
93.09, 93.75, 94.39, 95.04, 95.69]
def make(self):
"""
Get the values to make the chart
"""
return {
'ages': self.ages,
'title': self.title,
'percentis_3': self.percentis_3,
'percentis_10': self.percentis_10,
'percentis_25': self.percentis_25,
'percentis_50': self.percentis_50,
'percentis_75': self.percentis_75,
'percentis_90': self.percentis_90,
'percentis_97': self.percentis_97
}
| 51.105263 | 84 | 0.438723 | class HeightCurveFemaleMonths(object):
def __init__(self):
self.title = "Height-based growth curve for females aged 0 to 36 months"
self.ages = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36]
self.percentis_3 = [41.54, 44.24, 46.88, 49.40, 51.74, 53.89, 55.84, 57.60,
59.18, 60.60, 61.90, 63.08, 64.18, 65.20, 66.17, 67.09,
67.96, 68.80, 69.61, 70.38, 71.13, 71.84, 72.53, 73.20,
73.85, 74.49, 75.11, 75.72, 76.32, 76.91, 77.49, 78.08,
78.65, 79.23, 79.79, 80.36, 80.92]
self.percentis_10 = [43.84, 46.51, 49.13, 51.62, 53.94, 56.07, 58.00, 59.73,
61.29, 62.69, 63.96, 65.13, 66.21, 67.23, 68.19, 69.10,
69.98, 70.82, 71.64, 72.43, 73.19, 73.92, 74.64, 75.33,
76.01, 76.68, 77.33, 77.98, 78.62, 79.25, 79.88, 80.51,
81.13, 81.76, 82.38, 82.99, 83.60]
self.percentis_25 = [45.87, 48.57, 51.21, 53.73, 56.06, 58.19, 60.12, 61.85,
63.40, 64.79, 66.06, 67.22, 68.29, 69.30, 70.26, 71.18,
72.06, 72.91, 73.74, 74.54, 75.31, 76.07, 76.80, 77.52,
78.23, 78.92, 79.60, 80.28, 80.94, 81.61, 82.27, 82.93,
83.58, 84.24, 84.89, 85.54, 86.19]
self.percentis_50 = [47.70, 50.46, 53.16, 55.72, 58.10, 60.26, 62.21, 63.95,
65.51, 66.91, 68.17, 69.34, 70.41, 71.42, 72.39, 73.31,
74.20, 75.06, 75.90, 76.72, 77.51, 78.28, 79.03, 79.77,
80.49, 81.21, 81.91, 82.60, 83.29, 83.97, 84.65, 85.33,
86.00, 86.68, 87.35, 88.01, 88.68]
self.percentis_75 = [49.37, 52.22, 54.99, 57.63, 60.06, 62.27, 64.26, 66.04,
67.62, 69.04, 70.32, 71.49, 72.58, 73.60, 74.57, 75.51,
76.41, 77.28, 78.14, 78.97, 79.77, 80.56, 81.33, 82.08,
82.82, 83.54, 84.26, 84.96, 85.65, 86.34, 87.03, 87.71,
88.39, 89.07, 89.75, 90.42, 91.09]
self.percentis_90 = [50.91, 53.85, 56.73, 59.45, 61.96, 64.24, 66.29, 68.12,
69.74, 71.18, 72.49, 73.68, 74.79, 75.83, 76.81, 77.76,
78.68, 79.57, 80.44, 81.29, 82.11, 82.91, 83.69, 84.45,
85.20, 85.93, 86.64, 87.35, 88.04, 88.73, 89.40, 90.08,
90.76, 91.43, 92.09, 92.76, 93.42]
self.percentis_97 = [52.34, 55.39, 58.37, 61.20, 63.80, 66.17, 68.29, 70.18,
71.85, 73.34, 74.69, 75.91, 77.05, 78.11, 79.12, 80.09,
81.03, 81.94, 82.83, 83.69, 84.53, 85.34, 86.13, 86.89,
87.64, 88.37, 89.08, 89.77, 90.45, 91.12, 91.78, 92.44,
93.09, 93.75, 94.39, 95.04, 95.69]
def make(self):
return {
'ages': self.ages,
'title': self.title,
'percentis_3': self.percentis_3,
'percentis_10': self.percentis_10,
'percentis_25': self.percentis_25,
'percentis_50': self.percentis_50,
'percentis_75': self.percentis_75,
'percentis_90': self.percentis_90,
'percentis_97': self.percentis_97
}
| true | true |
1c35e4b1e40e29f048bd365d01a0fe5a6056cdd9 | 3,615 | py | Python | experiments/explorations/experiments/experiment_000104/repetition_000007/experiment_config.py | flowersteam/automated_discovery_of_lenia_patterns | 97cc7cde2120fa95225d1e470e00b8aa8c034e97 | [
"MIT"
] | 10 | 2019-10-05T16:22:11.000Z | 2021-12-30T14:09:42.000Z | experiments/explorations/experiments/experiment_000104/repetition_000007/experiment_config.py | flowersteam/automated_discovery_of_lenia_patterns | 97cc7cde2120fa95225d1e470e00b8aa8c034e97 | [
"MIT"
] | null | null | null | experiments/explorations/experiments/experiment_000104/repetition_000007/experiment_config.py | flowersteam/automated_discovery_of_lenia_patterns | 97cc7cde2120fa95225d1e470e00b8aa8c034e97 | [
"MIT"
] | 2 | 2019-10-14T12:12:38.000Z | 2020-09-16T11:18:26.000Z | import autodisc as ad
def get_system_parameters():
system_parameters = ad.systems.Lenia.default_system_parameters()
system_parameters.size_y = 256
system_parameters.size_x = 256
return system_parameters
def get_explorer_config():
explorer_config = ad.explorers.GoalSpaceExplorer.default_config()
explorer_config.seed = 7
explorer_config.num_of_random_initialization = 1000
explorer_config.run_parameters = []
# Parameter 1: init state
parameter = ad.Config()
parameter.name = 'init_state'
parameter.type = 'cppn_evolution'
parameter.init = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.init.neat_config_file = 'neat_config.cfg'
parameter.init.n_generations = 1
parameter.init.best_genome_of_last_generation = True
parameter.mutate = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.mutate.neat_config_file = 'neat_config.cfg'
parameter.mutate.n_generations = 2
parameter.mutate.best_genome_of_last_generation = True
explorer_config.run_parameters.append(parameter)
# Parameter 2: R
parameter = ad.Config()
parameter.name = 'R'
parameter.type = 'sampling'
parameter.init = ('discrete', 2, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 2, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 3: T
parameter = ad.Config()
parameter.name = 'T'
parameter.type = 'sampling'
parameter.init = ('discrete', 1, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 1, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 4: b
parameter = ad.Config()
parameter.name = 'b'
parameter.type = 'sampling'
parameter.init = ('function', ad.helper.sampling.sample_vector, (('discrete', 1, 3), (0, 1)))
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 5: m
parameter = ad.Config()
parameter.name = 'm'
parameter.type = 'sampling'
parameter.init = ('continuous', 0, 1)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 6: s
parameter = ad.Config()
parameter.name = 's'
parameter.type = 'sampling'
parameter.init = ('continuous', 0.001, 0.3)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.01, 'min': 0.001, 'max': 0.3}
explorer_config.run_parameters.append(parameter)
# which statistics are used as a goal space
explorer_config.goal_space_representation.type = 'statisticsrepresentation'
explorer_config.goal_space_representation.config = ad.representations.static.StatisticRepresentation.default_config()
explorer_config.goal_space_representation.config.statistics = ['activation_mass[-1]','activation_volume[-1]','activation_mass_asymmetry[-1]','activation_mass_distribution[-1]']
explorer_config.goal_space_representation.config.distance_function = ad.systems.lenia.LeniaStatistics.calc_goalspace_distance
# how are goals sampled
explorer_config.goal_selection.type = 'random'
explorer_config.goal_selection.sampling = [(0, 1),(0, 1),(-1, 1),(0, 1)]
# how are the source policies for a mutation are selected
explorer_config.source_policy_selection.type = 'optimal'
return explorer_config
def get_number_of_explorations():
return 5000
| 38.457447 | 180 | 0.712033 | import autodisc as ad
def get_system_parameters():
system_parameters = ad.systems.Lenia.default_system_parameters()
system_parameters.size_y = 256
system_parameters.size_x = 256
return system_parameters
def get_explorer_config():
explorer_config = ad.explorers.GoalSpaceExplorer.default_config()
explorer_config.seed = 7
explorer_config.num_of_random_initialization = 1000
explorer_config.run_parameters = []
parameter = ad.Config()
parameter.name = 'init_state'
parameter.type = 'cppn_evolution'
parameter.init = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.init.neat_config_file = 'neat_config.cfg'
parameter.init.n_generations = 1
parameter.init.best_genome_of_last_generation = True
parameter.mutate = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.mutate.neat_config_file = 'neat_config.cfg'
parameter.mutate.n_generations = 2
parameter.mutate.best_genome_of_last_generation = True
explorer_config.run_parameters.append(parameter)
parameter = ad.Config()
parameter.name = 'R'
parameter.type = 'sampling'
parameter.init = ('discrete', 2, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 2, 'max': 20}
explorer_config.run_parameters.append(parameter)
parameter = ad.Config()
parameter.name = 'T'
parameter.type = 'sampling'
parameter.init = ('discrete', 1, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 1, 'max': 20}
explorer_config.run_parameters.append(parameter)
parameter = ad.Config()
parameter.name = 'b'
parameter.type = 'sampling'
parameter.init = ('function', ad.helper.sampling.sample_vector, (('discrete', 1, 3), (0, 1)))
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
parameter = ad.Config()
parameter.name = 'm'
parameter.type = 'sampling'
parameter.init = ('continuous', 0, 1)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
parameter = ad.Config()
parameter.name = 's'
parameter.type = 'sampling'
parameter.init = ('continuous', 0.001, 0.3)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.01, 'min': 0.001, 'max': 0.3}
explorer_config.run_parameters.append(parameter)
explorer_config.goal_space_representation.type = 'statisticsrepresentation'
explorer_config.goal_space_representation.config = ad.representations.static.StatisticRepresentation.default_config()
explorer_config.goal_space_representation.config.statistics = ['activation_mass[-1]','activation_volume[-1]','activation_mass_asymmetry[-1]','activation_mass_distribution[-1]']
explorer_config.goal_space_representation.config.distance_function = ad.systems.lenia.LeniaStatistics.calc_goalspace_distance
explorer_config.goal_selection.type = 'random'
explorer_config.goal_selection.sampling = [(0, 1),(0, 1),(-1, 1),(0, 1)]
explorer_config.source_policy_selection.type = 'optimal'
return explorer_config
def get_number_of_explorations():
return 5000
| true | true |
1c35e51a479f03f6c0cb838cd0d553447b63b535 | 1,660 | py | Python | app/ports/tests/test_filters.py | gagarine/macports-webapp | 3f08a430218df89dc6cf7864482d55c156013ce4 | [
"BSD-2-Clause"
] | null | null | null | app/ports/tests/test_filters.py | gagarine/macports-webapp | 3f08a430218df89dc6cf7864482d55c156013ce4 | [
"BSD-2-Clause"
] | 2 | 2021-06-09T19:19:08.000Z | 2021-06-10T20:33:48.000Z | app/ports/tests/test_filters.py | gagarine/macports-webapp | 3f08a430218df89dc6cf7864482d55c156013ce4 | [
"BSD-2-Clause"
] | null | null | null | import os
from django.test import TransactionTestCase, Client
from django.urls import reverse
from ports.models import Port
from MacPorts.config import TEST_PORTINDEX_JSON
class TestDependencies(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = Client()
Port.load(TEST_PORTINDEX_JSON)
def test_search(self):
response1 = self.client.get(reverse('ports_search'), data={
'search_by': 'name',
'name': 'port',
'search_text': 'port'
})
response2 = self.client.get(reverse('ports_search'), data={
'search_by': 'description',
'description': 'categoryA',
'search_text': 'categoryA'
})
response3 = self.client.get(reverse('ports_search'), data={
'search_by': 'name',
'name': 'port-A5',
'search_text': 'port-A5'
})
self.assertEquals(response1.context['ports'].count(), 8)
self.assertEquals(response2.context['ports'].count(), 6)
self.assertEquals(response3.context['ports'].count(), 1)
def test_search_in_category(self):
response = self.client.get(reverse('search_ports_in_category'), data={
'name': 'port-A3',
'categories__name': 'categoryA',
})
self.assertEquals(response.context['ports'].count(), 1)
def test_search_in_maintainer(self):
response = self.client.get(reverse('search_ports_in_maintainer'), data={
'name': 'port-A',
'maintainers__name': 'user',
})
self.assertEquals(response.context['ports'].count(), 4)
| 29.642857 | 80 | 0.605422 | import os
from django.test import TransactionTestCase, Client
from django.urls import reverse
from ports.models import Port
from MacPorts.config import TEST_PORTINDEX_JSON
class TestDependencies(TransactionTestCase):
reset_sequences = True
def setUp(self):
self.client = Client()
Port.load(TEST_PORTINDEX_JSON)
def test_search(self):
response1 = self.client.get(reverse('ports_search'), data={
'search_by': 'name',
'name': 'port',
'search_text': 'port'
})
response2 = self.client.get(reverse('ports_search'), data={
'search_by': 'description',
'description': 'categoryA',
'search_text': 'categoryA'
})
response3 = self.client.get(reverse('ports_search'), data={
'search_by': 'name',
'name': 'port-A5',
'search_text': 'port-A5'
})
self.assertEquals(response1.context['ports'].count(), 8)
self.assertEquals(response2.context['ports'].count(), 6)
self.assertEquals(response3.context['ports'].count(), 1)
def test_search_in_category(self):
response = self.client.get(reverse('search_ports_in_category'), data={
'name': 'port-A3',
'categories__name': 'categoryA',
})
self.assertEquals(response.context['ports'].count(), 1)
def test_search_in_maintainer(self):
response = self.client.get(reverse('search_ports_in_maintainer'), data={
'name': 'port-A',
'maintainers__name': 'user',
})
self.assertEquals(response.context['ports'].count(), 4)
| true | true |
1c35e5f10ca1d74c93cb10e6516ba3ec8ab810db | 4,633 | py | Python | apps/predict/src/predict.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | 1 | 2020-02-02T13:53:21.000Z | 2020-02-02T13:53:21.000Z | apps/predict/src/predict.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | apps/predict/src/predict.py | kikei/btc-bot-ai | cb118fa1809ebef472a2025be697c9050e948009 | [
"Apache-2.0"
] | null | null | null | import datetime
# Numpy
import numpy as np
# Matplotlib
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from Plotter import Plotter
from dsp import crosszero
from learningUtils import validated, to2d, zscore, loadModel
from utils import readConfig, getLogger, reportTrend, loadnpy, StopWatch
logger = getLogger()
logger.debug('Start prediction.')
# Measure run time
timer = StopWatch()
timer.start()
config = readConfig('predict.ini')
INPUT_SIZE = config['predict'].getint('fitting.inputsize')
SAMPLES_PREDICT = config['train'].getint('samples.predict')
def load(exchanger, unit, ty):
return loadnpy(config, exchanger, unit, ty, nan=0.)
Xbh1 = load('bitflyer', 'hourly', 'askAverage')
Xbh2 = load('bitflyer', 'hourly', 'askMax')
Xbh3 = load('bitflyer', 'hourly', 'askMin')
Xbhb1 = load('bitflyer', 'hourly', 'askAverageBB+2')
Xbhb2 = load('bitflyer', 'hourly', 'askAverageBB-2')
Xbhi1 = load('bitflyer', 'hourly', 'askAverageConv')
Xbhi2 = load('bitflyer', 'hourly', 'askAverageBase')
Xbhi3 = load('bitflyer', 'hourly', 'askAveragePrc1')
Xbhi4 = load('bitflyer', 'hourly', 'askAveragePrc2')
Xbm1 = loadnpy(config, 'bitflyer', 'minutely', 'askAverage')
Xqm1 = loadnpy(config, 'quoine', 'minutely', 'askAverage')
ybh1 = load('bitflyer', 'hourly', 'askCloseTrend')
ybh1 = validated(ybh1)
sampleSize = INPUT_SIZE
featureCount = 11
availableSize = len(Xbhi4)
dataSize = availableSize - sampleSize + 1
Xbh0 = np.zeros((dataSize, sampleSize, featureCount))
Xbh0[:,:,0] = to2d(Xbh1, sampleSize, available=availableSize)
Xbh0[:,:,1] = to2d(Xbh2, sampleSize, available=availableSize)
Xbh0[:,:,2] = to2d(Xbh3, sampleSize, available=availableSize)
Xbh0[:,:,3] = to2d(Xbhb1, sampleSize, available=availableSize)
Xbh0[:,:,4] = to2d(Xbhb2, sampleSize, available=availableSize)
Xbh0[:,:,5] = to2d(Xbhi1, sampleSize, available=availableSize)
Xbh0[:,:,6] = to2d(Xbhi2, sampleSize, available=availableSize)
Xbh0[:,:,7] = to2d(Xbhi3, sampleSize, available=availableSize)
Xbh0[:,:,8] = to2d(Xbhi4, sampleSize, available=availableSize)
# setup minutely
availableSizeM = (dataSize - 1) * 60 + sampleSize
d = datetime.datetime.now()
minutesToday = d.hour * 60 + d.minute
Xbh0[-1:,:,9] = Xbm1[-sampleSize:]
Xbh0[:-1,:,9] = to2d(Xbm1[:-minutesToday], sampleSize,
available=availableSizeM, stride=60)
Xbh0[-1:,:,10] = Xqm1[-sampleSize:]
Xbh0[:-1,:,10] = to2d(Xqm1[:-minutesToday], sampleSize,
available=availableSizeM, stride=60)
dataSize = Xbh0.shape[0]
Xbh = np.zeros((dataSize, Xbh0.shape[1] * Xbh0.shape[2]))
for i in range(0, dataSize):
for j in range(0, featureCount):
Xbh[i,j*sampleSize:(j+1)*sampleSize] = Xbh0[i,:,j]
ybh0 = ybh1[len(ybh1)-availableSize+sampleSize-1:]
# Restore models.
yModel = loadModel(config, 'trend')
# Prediction
logger.debug('Predicting current trend, Xbh.shape={x}...'.format(x=Xbh.shape))
ybhPred = yModel.predict(zscore(Xbh))[:,0]
def smoothPredicted(y, n, z=None):
if z is None:
z = lambda i:i/n
f = np.zeros(n * 2)
for i in range(0, n):
f[n+i] = z(i)
f = f / np.sum(f)
y = np.convolve(y, f, mode='same')
return y
p = Plotter(plt, subplots=(3, 1), linewidth=0.4)
Xbh1_ = Xbh1[len(Xbh1)-availableSize+sampleSize-1:]
ybhAvr = smoothPredicted(ybhPred, 11)
ybhZero = crosszero(ybhAvr - 0.5, thres=5e-3)
xlim = (Xbh1_.shape[0] - 2000, Xbh1_.shape[0] - 0)
xPlot = np.arange(0, len(Xbh1_), 1)
p.plot(xPlot, Xbh1_, n=0, label='ask avr.')
for k, label in [(np.argwhere(ybhZero == -1.), 'short'),
(np.argwhere(ybhZero == +1.), 'long')]:
p.scatter(k, Xbh1_[k], n=0, marker='x', linewidth=0.4, label=label)
p.limit(Xbh1_, xlim, n=0)
p.plot(xPlot, ybh0, n=1, label='exp.')
p.plot(xPlot, ybhPred, n=1, label='pred.')
p.plot(xPlot, ybhAvr, n=1, label='avr.')
p.hlines(0.5, 0, len(Xbh1_), n=1, linewidth=0.4)
p.vlines(len(Xbh1_) - SAMPLES_PREDICT, 0, 1, n=1, linewidth=0.4)
p.limit(ybh0, xlim, n=1)
p.plot(xPlot, np.abs(ybh0 - ybhPred), n=2, label='delta')
p.savefig('../figures/predicted.svg')
SHOW_LAST_PREDICTS = 24 * 3
for i in range(SHOW_LAST_PREDICTS, 0, -1):
if i == 1:
logger.warn('Current predicts are trend={trend:0.2f}.'
.format(trend=ybhPred[-i]))
else:
logger.info('Predicts[{i:2.0f}] are trend={trend:0.2f}.'
.format(i=i, trend=ybhPred[-i]))
# Finished
seconds = timer.stop()
logger.debug('End prediction, elapsed={s:.2f}s'.format(s=seconds))
logger.debug('Start registering.')
yTrend = ybhPred[-1].item()
logger.debug('Registering trend={trend:.3f}.'.format(trend=yTrend))
reportTrend(config, yTrend, logger)
logger.debug('End registering.')
| 33.330935 | 78 | 0.681416 | import datetime
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from Plotter import Plotter
from dsp import crosszero
from learningUtils import validated, to2d, zscore, loadModel
from utils import readConfig, getLogger, reportTrend, loadnpy, StopWatch
logger = getLogger()
logger.debug('Start prediction.')
timer = StopWatch()
timer.start()
config = readConfig('predict.ini')
INPUT_SIZE = config['predict'].getint('fitting.inputsize')
SAMPLES_PREDICT = config['train'].getint('samples.predict')
def load(exchanger, unit, ty):
return loadnpy(config, exchanger, unit, ty, nan=0.)
Xbh1 = load('bitflyer', 'hourly', 'askAverage')
Xbh2 = load('bitflyer', 'hourly', 'askMax')
Xbh3 = load('bitflyer', 'hourly', 'askMin')
Xbhb1 = load('bitflyer', 'hourly', 'askAverageBB+2')
Xbhb2 = load('bitflyer', 'hourly', 'askAverageBB-2')
Xbhi1 = load('bitflyer', 'hourly', 'askAverageConv')
Xbhi2 = load('bitflyer', 'hourly', 'askAverageBase')
Xbhi3 = load('bitflyer', 'hourly', 'askAveragePrc1')
Xbhi4 = load('bitflyer', 'hourly', 'askAveragePrc2')
Xbm1 = loadnpy(config, 'bitflyer', 'minutely', 'askAverage')
Xqm1 = loadnpy(config, 'quoine', 'minutely', 'askAverage')
ybh1 = load('bitflyer', 'hourly', 'askCloseTrend')
ybh1 = validated(ybh1)
sampleSize = INPUT_SIZE
featureCount = 11
availableSize = len(Xbhi4)
dataSize = availableSize - sampleSize + 1
Xbh0 = np.zeros((dataSize, sampleSize, featureCount))
Xbh0[:,:,0] = to2d(Xbh1, sampleSize, available=availableSize)
Xbh0[:,:,1] = to2d(Xbh2, sampleSize, available=availableSize)
Xbh0[:,:,2] = to2d(Xbh3, sampleSize, available=availableSize)
Xbh0[:,:,3] = to2d(Xbhb1, sampleSize, available=availableSize)
Xbh0[:,:,4] = to2d(Xbhb2, sampleSize, available=availableSize)
Xbh0[:,:,5] = to2d(Xbhi1, sampleSize, available=availableSize)
Xbh0[:,:,6] = to2d(Xbhi2, sampleSize, available=availableSize)
Xbh0[:,:,7] = to2d(Xbhi3, sampleSize, available=availableSize)
Xbh0[:,:,8] = to2d(Xbhi4, sampleSize, available=availableSize)
availableSizeM = (dataSize - 1) * 60 + sampleSize
d = datetime.datetime.now()
minutesToday = d.hour * 60 + d.minute
Xbh0[-1:,:,9] = Xbm1[-sampleSize:]
Xbh0[:-1,:,9] = to2d(Xbm1[:-minutesToday], sampleSize,
available=availableSizeM, stride=60)
Xbh0[-1:,:,10] = Xqm1[-sampleSize:]
Xbh0[:-1,:,10] = to2d(Xqm1[:-minutesToday], sampleSize,
available=availableSizeM, stride=60)
dataSize = Xbh0.shape[0]
Xbh = np.zeros((dataSize, Xbh0.shape[1] * Xbh0.shape[2]))
for i in range(0, dataSize):
for j in range(0, featureCount):
Xbh[i,j*sampleSize:(j+1)*sampleSize] = Xbh0[i,:,j]
ybh0 = ybh1[len(ybh1)-availableSize+sampleSize-1:]
yModel = loadModel(config, 'trend')
logger.debug('Predicting current trend, Xbh.shape={x}...'.format(x=Xbh.shape))
ybhPred = yModel.predict(zscore(Xbh))[:,0]
def smoothPredicted(y, n, z=None):
if z is None:
z = lambda i:i/n
f = np.zeros(n * 2)
for i in range(0, n):
f[n+i] = z(i)
f = f / np.sum(f)
y = np.convolve(y, f, mode='same')
return y
p = Plotter(plt, subplots=(3, 1), linewidth=0.4)
Xbh1_ = Xbh1[len(Xbh1)-availableSize+sampleSize-1:]
ybhAvr = smoothPredicted(ybhPred, 11)
ybhZero = crosszero(ybhAvr - 0.5, thres=5e-3)
xlim = (Xbh1_.shape[0] - 2000, Xbh1_.shape[0] - 0)
xPlot = np.arange(0, len(Xbh1_), 1)
p.plot(xPlot, Xbh1_, n=0, label='ask avr.')
for k, label in [(np.argwhere(ybhZero == -1.), 'short'),
(np.argwhere(ybhZero == +1.), 'long')]:
p.scatter(k, Xbh1_[k], n=0, marker='x', linewidth=0.4, label=label)
p.limit(Xbh1_, xlim, n=0)
p.plot(xPlot, ybh0, n=1, label='exp.')
p.plot(xPlot, ybhPred, n=1, label='pred.')
p.plot(xPlot, ybhAvr, n=1, label='avr.')
p.hlines(0.5, 0, len(Xbh1_), n=1, linewidth=0.4)
p.vlines(len(Xbh1_) - SAMPLES_PREDICT, 0, 1, n=1, linewidth=0.4)
p.limit(ybh0, xlim, n=1)
p.plot(xPlot, np.abs(ybh0 - ybhPred), n=2, label='delta')
p.savefig('../figures/predicted.svg')
SHOW_LAST_PREDICTS = 24 * 3
for i in range(SHOW_LAST_PREDICTS, 0, -1):
if i == 1:
logger.warn('Current predicts are trend={trend:0.2f}.'
.format(trend=ybhPred[-i]))
else:
logger.info('Predicts[{i:2.0f}] are trend={trend:0.2f}.'
.format(i=i, trend=ybhPred[-i]))
seconds = timer.stop()
logger.debug('End prediction, elapsed={s:.2f}s'.format(s=seconds))
logger.debug('Start registering.')
yTrend = ybhPred[-1].item()
logger.debug('Registering trend={trend:.3f}.'.format(trend=yTrend))
reportTrend(config, yTrend, logger)
logger.debug('End registering.')
| true | true |
1c35e6cd724cbfdfe8ac69884daef8626c316eeb | 507 | py | Python | dash/migrations/0001_initial.py | sayederfanarefin/django-test-for-dashboard | 8df78db07086413b577d9767c6db5baa69580544 | [
"MIT"
] | 1 | 2022-03-06T11:30:48.000Z | 2022-03-06T11:30:48.000Z | dash/migrations/0001_initial.py | sayederfanarefin/django-test-for-dashboard | 8df78db07086413b577d9767c6db5baa69580544 | [
"MIT"
] | 3 | 2020-02-11T23:38:26.000Z | 2021-06-10T21:07:16.000Z | dash/migrations/0001_initial.py | sayederfanarefin/django-test-for-dashboard | 8df78db07086413b577d9767c6db5baa69580544 | [
"MIT"
] | 1 | 2019-11-05T06:47:58.000Z | 2019-11-05T06:47:58.000Z | # Generated by Django 2.1.5 on 2019-01-19 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docfile', models.FileField(upload_to='documents/%Y/%m/%d')),
],
),
]
| 23.045455 | 114 | 0.575937 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docfile', models.FileField(upload_to='documents/%Y/%m/%d')),
],
),
]
| true | true |
1c35e6ffda0c3e70cd8f9bacbc2969c4051d5b9d | 10,863 | py | Python | Python3/Tornado/apps/pg/PG_Collection/src/collectors/btc/btc_transfer_utils.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 24 | 2018-11-01T03:36:43.000Z | 2022-03-28T08:20:30.000Z | Python3/Tornado/apps/pg/PG_Collection/src/collectors/btc/btc_transfer_utils.py | songning4/QBlockChainNotes | d65ede073f5a20f728f41cc6850409693820cdb1 | [
"MIT"
] | 57 | 2019-12-04T08:26:47.000Z | 2022-03-08T07:35:15.000Z | Python3/Tornado/apps/pg/PG_Collection/src/collectors/btc/btc_transfer_utils.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 11 | 2019-01-04T08:41:57.000Z | 2022-03-16T03:51:36.000Z | #!coding:utf8
#author:yqq
#date:2020/7/10 0010 15:34
#description: BTC 转账工具类
from typing import Dict
import json
from bitcoin import SelectParams
from bitcoin.core import CMutableTxIn, COutPoint, lx, CScript, Hash160, CMutableTxOut, CMutableTransaction, b2x, COIN
from bitcoin.core.script import OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG, SignatureHash, SIGHASH_ALL
from bitcoin.core.scripteval import SCRIPT_VERIFY_P2SH, VerifyScript
from bitcoin.wallet import CBitcoinSecret, CBitcoinAddress
# from src.consumers.btc.btc_proxy import BTCProxy
from decimal import Decimal
# from src.lib.addr_gen.btc_addr_gen import PrivKeyToWIFCompress
from src.collectors.btc.btc_proxy import BTCProxy
from src.lib.addr_gen.btc_addr_gen import PrivKeyToWIFCompress
from src.lib.log import get_default_logger
from src.lib.pg_utils import round_down, decimal_default
from collections import OrderedDict
MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI = 10000
class BTCTransferUitl(BTCProxy):
def __init__(self, host: str, port: int, net_type: str = 'mainnet'):
assert net_type in ['testnet', 'mainnet', 'regtest'], f'invalid net_type {net_type}'
self.net_type = net_type
self.logger = get_default_logger()
super().__init__(host=host, port=port)
pass
def search_utxo(self, addrs: list, total_amount: Decimal,
min_utxo_value:int = MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI ) -> (bool, Dict, int):
ret_utxos_map = dict()
sum = Decimal(0)
sum_satoshi = 0
is_enough = False
for addr in addrs :
if is_enough: break
utxos = self.get_utxo(address=addr, include_mem=True)
utxos.sort(key=lambda item: item['value'], reverse=False) #按照金额升序排序
utxos.sort(key=lambda item: item['status']['confirmed'], reverse=True) #按照确认状态排序, 已确认的靠前
for utxo in utxos:
value_in_satoshi = utxo['value']
if value_in_satoshi < min_utxo_value: continue #金额太小, 不要
sum += round_down(Decimal(value_in_satoshi) / Decimal(10 ** 8))
sum_satoshi += value_in_satoshi
if addr not in ret_utxos_map: ret_utxos_map[addr] = []
ret_utxos_map[addr].append(utxo)
if sum >= total_amount:
is_enough = True
break
return is_enough, ret_utxos_map, sum_satoshi
def transfer(self, src_addrs_key_map: OrderedDict, dst_addrs_amount_map: dict ,
txfee: Decimal , auto_calc_pay_back: bool, pay_back_index: int = 0xfffffff,
ensure_one_txout: bool = False) -> str:
"""
:param src_addrs_key_map: {'addr1': 'privkey1', 'addr2': 'privkey2' } 私钥为 hex字符串
:param dst_addrs_amount_map: {'addr1':Decimal(0.1234), 'addr2':Deciaml(0.234) }
:param txfee: 矿工费
:param auto_calc_pay_back: 是否自动计算并找零
:param pay_back_index: 找零地址索引 即在 src_addrs_key_map 中的索引
:param ensure_one_txout: 确认只有一个交易输出(比如: 提币不需要找零的情况, 归集)
:return: txid
"""
#仅支持 P2PKH 类型的from地址
assert isinstance(src_addrs_key_map, OrderedDict) , 'src_addrs_key_map is not OrderedDict'
assert isinstance(dst_addrs_amount_map, dict), 'dst_addrs_amount_map is not dict'
assert Decimal('0.00001') <= txfee <= Decimal('0.001'), 'invalid txfee, please check txfee'
assert len(src_addrs_key_map) >= 1, 'src_addrs_key_map length must >= 1'
assert not (True == auto_calc_pay_back == ensure_one_txout) , \
'True == auto_calc_pay_back == ensure_one_txout , must be mutex '
if ensure_one_txout:
assert (len(dst_addrs_amount_map) == 1 ), 'dst_addrs_amount_map length must equal 1'
elif not auto_calc_pay_back:
assert (len(dst_addrs_amount_map) >= 1 ), 'dst_addrs_amount_map length must >= 2'
if auto_calc_pay_back and pay_back_index >= len(src_addrs_key_map):
raise Exception('pay_back_index is to large')
self.logger.info(f'dst_addrs_amount_map is { json.dumps(dst_addrs_amount_map, indent=4, default=decimal_default) }')
total_amount = sum(dst_addrs_amount_map.values()) + txfee
self.logger.info(f'total_amount is {total_amount}')
source_addrs = list(src_addrs_key_map.keys()) #WARNING: 禁止打印私钥!!!
is_enough, founded_utxos, sum_satoshi = self.search_utxo(addrs=source_addrs, total_amount=total_amount)
if not is_enough:
msg = 'balance is not enough'
self.logger.error(msg)
raise Exception(msg)
self.logger.info(f'founded_utxos is { json.dumps(founded_utxos, indent=4, default=decimal_default) }')
#设置全局变量
SelectParams(self.net_type)
#构造inputs
txins = []
utxo_owner_map = dict()
for addr , utxos in founded_utxos.items():
assert addr in src_addrs_key_map , 'addr is not in src_addrs_key_map'
for utxo in utxos:
txin = CMutableTxIn(prevout=COutPoint(hash=lx(utxo['txid']), n=utxo['vout']))
txins.append(txin)
#因为顺序不会被打乱, 所以可以直接使用 索引进行对应
utxo_owner_map[len(txins) - 1] = addr
#构造outputs
txouts = []
for to_addr , amount in dst_addrs_amount_map.items():
out = CMutableTxOut(nValue=amount * COIN, scriptPubKey=CBitcoinAddress(to_addr).to_scriptPubKey())
txouts.append(out)
#自动结算
if auto_calc_pay_back:
sum_in_satoshi = 0
for addr, utxos in founded_utxos.items():
for utxo in utxos:
sum_in_satoshi += utxo['value']
pay_back_in_satoshi = int(sum_in_satoshi - int(total_amount * COIN) )
if pay_back_in_satoshi >= MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI:
pay_back_addr = list(src_addrs_key_map.keys())[pay_back_index]
pay_back_out = CMutableTxOut(nValue=pay_back_in_satoshi,
scriptPubKey=CBitcoinAddress(pay_back_addr).to_scriptPubKey())
txouts.append(pay_back_out)
muttx = CMutableTransaction(vin=txins, vout=txouts)
#对每个 input 进行签名
for n in range(len(txins)):
#查找这个utxo属于哪个地址
owner_addr = utxo_owner_map[n]
privkey = src_addrs_key_map[owner_addr]
if len(privkey) == 64: # hex
wif_key = PrivKeyToWIFCompress(privkey, self.net_type != 'mainnet')
seckey = CBitcoinSecret(s = wif_key)
elif len(privkey) == 52: #base58格式
seckey = CBitcoinSecret(s = privkey)
else:
raise Exception("invalid privkey")
txin_script_pubkey = CScript([OP_DUP, OP_HASH160, Hash160(seckey.pub), OP_EQUALVERIFY, OP_CHECKSIG])
sig_hash = SignatureHash(txin_script_pubkey, muttx, n, SIGHASH_ALL)
sig = seckey.sign(sig_hash) + bytes([SIGHASH_ALL])
muttx.vin[n].scriptSig = CScript([sig, seckey.pub])
#TODO: 处理验签失败抛异常
VerifyScript(muttx.vin[n].scriptSig, txin_script_pubkey, muttx, n, (SCRIPT_VERIFY_P2SH,))
pass
raw_tx_hex = b2x(muttx.serialize())
self.logger.info(f'raw_tx_hex is: {raw_tx_hex}')
# local_tx_hash = muttx.GetTxid()
# self.logger.info(f'local_tx_hash is: {b2x(local_tx_hash)}')
#广播交易
assert self.ping() == True, 'bitcoind rpc is gone' # 测试 bitcoind的 rpc服务是否还在
txid = self.send_raw_tx(raw_tx_hex=raw_tx_hex)
self.logger.info(f'send_raw_tx txid is: {txid}')
return txid
def foo1():
btcutil = BTCTransferUitl(host='192.168.10.199', port=3002, net_type='regtest')
src_addr_key_map = OrderedDict()
src_addr_key_map['moAt6v6gpfJhSBYSmS2AzanW9565kakujW'] = '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98'
# {
# 'moAt6v6gpfJhSBYSmS2AzanW9565kakujW' : 'cSGCNnp3LxnRHaQnjrs3mRRX8wrSdeck5oDz51MhTyMx1mikrQKd',
# 'moAt6v6gpfJhSBYSmS2AzanW9565kakujW' : '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98',
# }
dst_addr_amount_map = {
'n4EUHxfnu1jvPRbqm9G7VTheH8WVYStUdm' : Decimal('100.0666'),
'mmNJuiQK4U4VEUcR3WCjmFD9UCEYHDw9jt' : Decimal('0.1234'),
'n2iwTm5cT7PCYQ4ymoFD5kycHMoV2Ab8TB' : Decimal('0.99999999'),
'2N11UaUuvA8dUVTPhCkUqP7yVtVsPQXv6Q1': Decimal('0.876543211')
}
txfee = Decimal('0.0001')
txid = btcutil.transfer(src_addrs_key_map=src_addr_key_map,
dst_addrs_amount_map=dst_addr_amount_map,
txfee=txfee,
auto_calc_pay_back=True,
pay_back_index=0,
ensure_one_txout=False)
# 75988ed243ae7d99c4d5eae632449418e36a9105cfdd5c46e6a1cd453b30b8ba
print(txid)
pass
def foo2():
btcutil = BTCTransferUitl(host='192.168.10.199', port=3002, net_type='regtest')
src_addr_key_map = {
# 'moAt6v6gpfJhSBYSmS2AzanW9565kakujW' : 'cSGCNnp3LxnRHaQnjrs3mRRX8wrSdeck5oDz51MhTyMx1mikrQKd',
'moAt6v6gpfJhSBYSmS2AzanW9565kakujW': '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98',
'mjGRnCSyan333FdQVKonTFTmNqESaHUJmt': 'cVNHD7FCKEpm3yafwNjusAjz1oqm9e2nHpQJzhmHyCMZLbckCNbg',
}
src_addr_key_map = OrderedDict()
src_addr_key_map['moAt6v6gpfJhSBYSmS2AzanW9565kakujW'] = '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98'
src_addr_key_map['mjGRnCSyan333FdQVKonTFTmNqESaHUJmt'] = 'cVNHD7FCKEpm3yafwNjusAjz1oqm9e2nHpQJzhmHyCMZLbckCNbg'
dst_addr_amount_map = {
'n4EUHxfnu1jvPRbqm9G7VTheH8WVYStUdm': Decimal('100.0666'),
'mmNJuiQK4U4VEUcR3WCjmFD9UCEYHDw9jt': Decimal('0.1234'),
'n2iwTm5cT7PCYQ4ymoFD5kycHMoV2Ab8TB': Decimal('0.99999999'),
# '2N11UaUuvA8dUVTPhCkUqP7yVtVsPQXv6Q1': Decimal('100.876543211'),
# 'moAt6v6gpfJhSBYSmS2AzanW9565kakujW': Decimal('0.123')
}
txfee = Decimal('0.0001')
txid = btcutil.transfer(src_addrs_key_map=src_addr_key_map,
dst_addrs_amount_map=dst_addr_amount_map,
txfee=txfee,
auto_calc_pay_back=True,
pay_back_index=0, #指定找零地址
ensure_one_txout=False)
# 75988ed243ae7d99c4d5eae632449418e36a9105cfdd5c46e6a1cd453b30b8ba
print(txid)
pass
def main():
# foo1()
foo2()
pass
if __name__ == '__main__':
main() | 40.233333 | 128 | 0.640891 |
from typing import Dict
import json
from bitcoin import SelectParams
from bitcoin.core import CMutableTxIn, COutPoint, lx, CScript, Hash160, CMutableTxOut, CMutableTransaction, b2x, COIN
from bitcoin.core.script import OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG, SignatureHash, SIGHASH_ALL
from bitcoin.core.scripteval import SCRIPT_VERIFY_P2SH, VerifyScript
from bitcoin.wallet import CBitcoinSecret, CBitcoinAddress
from decimal import Decimal
from src.collectors.btc.btc_proxy import BTCProxy
from src.lib.addr_gen.btc_addr_gen import PrivKeyToWIFCompress
from src.lib.log import get_default_logger
from src.lib.pg_utils import round_down, decimal_default
from collections import OrderedDict
MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI = 10000
class BTCTransferUitl(BTCProxy):
def __init__(self, host: str, port: int, net_type: str = 'mainnet'):
assert net_type in ['testnet', 'mainnet', 'regtest'], f'invalid net_type {net_type}'
self.net_type = net_type
self.logger = get_default_logger()
super().__init__(host=host, port=port)
pass
def search_utxo(self, addrs: list, total_amount: Decimal,
min_utxo_value:int = MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI ) -> (bool, Dict, int):
ret_utxos_map = dict()
sum = Decimal(0)
sum_satoshi = 0
is_enough = False
for addr in addrs :
if is_enough: break
utxos = self.get_utxo(address=addr, include_mem=True)
utxos.sort(key=lambda item: item['value'], reverse=False)
utxos.sort(key=lambda item: item['status']['confirmed'], reverse=True)
for utxo in utxos:
value_in_satoshi = utxo['value']
if value_in_satoshi < min_utxo_value: continue
sum += round_down(Decimal(value_in_satoshi) / Decimal(10 ** 8))
sum_satoshi += value_in_satoshi
if addr not in ret_utxos_map: ret_utxos_map[addr] = []
ret_utxos_map[addr].append(utxo)
if sum >= total_amount:
is_enough = True
break
return is_enough, ret_utxos_map, sum_satoshi
def transfer(self, src_addrs_key_map: OrderedDict, dst_addrs_amount_map: dict ,
txfee: Decimal , auto_calc_pay_back: bool, pay_back_index: int = 0xfffffff,
ensure_one_txout: bool = False) -> str:
assert isinstance(src_addrs_key_map, OrderedDict) , 'src_addrs_key_map is not OrderedDict'
assert isinstance(dst_addrs_amount_map, dict), 'dst_addrs_amount_map is not dict'
assert Decimal('0.00001') <= txfee <= Decimal('0.001'), 'invalid txfee, please check txfee'
assert len(src_addrs_key_map) >= 1, 'src_addrs_key_map length must >= 1'
assert not (True == auto_calc_pay_back == ensure_one_txout) , \
'True == auto_calc_pay_back == ensure_one_txout , must be mutex '
if ensure_one_txout:
assert (len(dst_addrs_amount_map) == 1 ), 'dst_addrs_amount_map length must equal 1'
elif not auto_calc_pay_back:
assert (len(dst_addrs_amount_map) >= 1 ), 'dst_addrs_amount_map length must >= 2'
if auto_calc_pay_back and pay_back_index >= len(src_addrs_key_map):
raise Exception('pay_back_index is to large')
self.logger.info(f'dst_addrs_amount_map is { json.dumps(dst_addrs_amount_map, indent=4, default=decimal_default) }')
total_amount = sum(dst_addrs_amount_map.values()) + txfee
self.logger.info(f'total_amount is {total_amount}')
source_addrs = list(src_addrs_key_map.keys())
is_enough, founded_utxos, sum_satoshi = self.search_utxo(addrs=source_addrs, total_amount=total_amount)
if not is_enough:
msg = 'balance is not enough'
self.logger.error(msg)
raise Exception(msg)
self.logger.info(f'founded_utxos is { json.dumps(founded_utxos, indent=4, default=decimal_default) }')
SelectParams(self.net_type)
txins = []
utxo_owner_map = dict()
for addr , utxos in founded_utxos.items():
assert addr in src_addrs_key_map , 'addr is not in src_addrs_key_map'
for utxo in utxos:
txin = CMutableTxIn(prevout=COutPoint(hash=lx(utxo['txid']), n=utxo['vout']))
txins.append(txin)
utxo_owner_map[len(txins) - 1] = addr
txouts = []
for to_addr , amount in dst_addrs_amount_map.items():
out = CMutableTxOut(nValue=amount * COIN, scriptPubKey=CBitcoinAddress(to_addr).to_scriptPubKey())
txouts.append(out)
if auto_calc_pay_back:
sum_in_satoshi = 0
for addr, utxos in founded_utxos.items():
for utxo in utxos:
sum_in_satoshi += utxo['value']
pay_back_in_satoshi = int(sum_in_satoshi - int(total_amount * COIN) )
if pay_back_in_satoshi >= MIN_AVAILABLE_UTXO_VALUE_IN_SATOSHI:
pay_back_addr = list(src_addrs_key_map.keys())[pay_back_index]
pay_back_out = CMutableTxOut(nValue=pay_back_in_satoshi,
scriptPubKey=CBitcoinAddress(pay_back_addr).to_scriptPubKey())
txouts.append(pay_back_out)
muttx = CMutableTransaction(vin=txins, vout=txouts)
for n in range(len(txins)):
owner_addr = utxo_owner_map[n]
privkey = src_addrs_key_map[owner_addr]
if len(privkey) == 64:
wif_key = PrivKeyToWIFCompress(privkey, self.net_type != 'mainnet')
seckey = CBitcoinSecret(s = wif_key)
elif len(privkey) == 52:
seckey = CBitcoinSecret(s = privkey)
else:
raise Exception("invalid privkey")
txin_script_pubkey = CScript([OP_DUP, OP_HASH160, Hash160(seckey.pub), OP_EQUALVERIFY, OP_CHECKSIG])
sig_hash = SignatureHash(txin_script_pubkey, muttx, n, SIGHASH_ALL)
sig = seckey.sign(sig_hash) + bytes([SIGHASH_ALL])
muttx.vin[n].scriptSig = CScript([sig, seckey.pub])
VerifyScript(muttx.vin[n].scriptSig, txin_script_pubkey, muttx, n, (SCRIPT_VERIFY_P2SH,))
pass
raw_tx_hex = b2x(muttx.serialize())
self.logger.info(f'raw_tx_hex is: {raw_tx_hex}')
assert self.ping() == True, 'bitcoind rpc is gone'
txid = self.send_raw_tx(raw_tx_hex=raw_tx_hex)
self.logger.info(f'send_raw_tx txid is: {txid}')
return txid
def foo1():
btcutil = BTCTransferUitl(host='192.168.10.199', port=3002, net_type='regtest')
src_addr_key_map = OrderedDict()
src_addr_key_map['moAt6v6gpfJhSBYSmS2AzanW9565kakujW'] = '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98'
dst_addr_amount_map = {
'n4EUHxfnu1jvPRbqm9G7VTheH8WVYStUdm' : Decimal('100.0666'),
'mmNJuiQK4U4VEUcR3WCjmFD9UCEYHDw9jt' : Decimal('0.1234'),
'n2iwTm5cT7PCYQ4ymoFD5kycHMoV2Ab8TB' : Decimal('0.99999999'),
'2N11UaUuvA8dUVTPhCkUqP7yVtVsPQXv6Q1': Decimal('0.876543211')
}
txfee = Decimal('0.0001')
txid = btcutil.transfer(src_addrs_key_map=src_addr_key_map,
dst_addrs_amount_map=dst_addr_amount_map,
txfee=txfee,
auto_calc_pay_back=True,
pay_back_index=0,
ensure_one_txout=False)
print(txid)
pass
def foo2():
btcutil = BTCTransferUitl(host='192.168.10.199', port=3002, net_type='regtest')
src_addr_key_map = {
'moAt6v6gpfJhSBYSmS2AzanW9565kakujW': '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98',
'mjGRnCSyan333FdQVKonTFTmNqESaHUJmt': 'cVNHD7FCKEpm3yafwNjusAjz1oqm9e2nHpQJzhmHyCMZLbckCNbg',
}
src_addr_key_map = OrderedDict()
src_addr_key_map['moAt6v6gpfJhSBYSmS2AzanW9565kakujW'] = '8baadf3faf9b7f8df9089d550abd75ef33ec7d02469f8ff4169f1b31f0b60b98'
src_addr_key_map['mjGRnCSyan333FdQVKonTFTmNqESaHUJmt'] = 'cVNHD7FCKEpm3yafwNjusAjz1oqm9e2nHpQJzhmHyCMZLbckCNbg'
dst_addr_amount_map = {
'n4EUHxfnu1jvPRbqm9G7VTheH8WVYStUdm': Decimal('100.0666'),
'mmNJuiQK4U4VEUcR3WCjmFD9UCEYHDw9jt': Decimal('0.1234'),
'n2iwTm5cT7PCYQ4ymoFD5kycHMoV2Ab8TB': Decimal('0.99999999'),
}
txfee = Decimal('0.0001')
txid = btcutil.transfer(src_addrs_key_map=src_addr_key_map,
dst_addrs_amount_map=dst_addr_amount_map,
txfee=txfee,
auto_calc_pay_back=True,
pay_back_index=0,
ensure_one_txout=False)
print(txid)
pass
def main():
foo2()
pass
if __name__ == '__main__':
main() | true | true |
1c35e8a4968babee14c8b6baf7a4bf610c8351f4 | 1,573 | py | Python | submissions/functional/denas/Android_malware/Scripts/stability.py | SeekingDream/fse20 | c8a9199f002f947161cc7acdab5d025ab27a6598 | [
"Unlicense"
] | null | null | null | submissions/functional/denas/Android_malware/Scripts/stability.py | SeekingDream/fse20 | c8a9199f002f947161cc7acdab5d025ab27a6598 | [
"Unlicense"
] | null | null | null | submissions/functional/denas/Android_malware/Scripts/stability.py | SeekingDream/fse20 | c8a9199f002f947161cc7acdab5d025ab27a6598 | [
"Unlicense"
] | null | null | null | from Android_malware.Scripts.TreeLearning import *
from Android_malware.Scripts.Denasrule import *
def transferRuleSet(RuleSet):
NewRuleSet = set()
for rule in RuleSet:
str_rule = []
for r in rule:
str_rule.append(r[0] * 2 + r[1])
str_rule = np.sort(str_rule)
new_s = ''
for r in str_rule:
new_s += str(r) + '_'
NewRuleSet.add(new_s)
return NewRuleSet
def calculate_stability(testnum, max_depth = 10, rule_num = 100):
x, _ = ReadData(year=NowYear, IsTrain=True)
RuleSet_1 = generateDTreeRuleSet(x, testnum, maxdepth = max_depth)
RuleSet_1 = transferRuleSet(RuleSet_1)
RuleSet_2 = generateDTreeRuleSet(x, testnum, maxdepth= max_depth)
RuleSet_2 = transferRuleSet(RuleSet_2)
return len(RuleSet_1 & RuleSet_2) / (len(RuleSet_1 | RuleSet_2)) * 2
def baseline_stability():
testnum = 1000
print('DTExtract 1000:', calculate_stability(testnum))
testnum = 5000
print('DTExtract 5000:', calculate_stability(testnum))
testnum = 10000
print('DTExtract 10000:', calculate_stability(testnum))
def DenasValue():
model = loadModel(2019)
puppetModel = getPuppetModel("../model/" + str(2019) + "/MLP_model.h5")
R_1 = gerenateDenasRule(puppetModel, model, 5, maxlength=10)
R_1 = transferRuleSet(R_1)
R_2 = gerenateDenasRule(puppetModel, model, 5, maxlength=10)
R_2 = transferRuleSet(R_2)
return len(R_1 & R_2) / (len(R_1 | R_2))
if __name__ == '__main__':
baseline_stability()
print('Denas:', DenasValue()) | 31.46 | 75 | 0.668786 | from Android_malware.Scripts.TreeLearning import *
from Android_malware.Scripts.Denasrule import *
def transferRuleSet(RuleSet):
NewRuleSet = set()
for rule in RuleSet:
str_rule = []
for r in rule:
str_rule.append(r[0] * 2 + r[1])
str_rule = np.sort(str_rule)
new_s = ''
for r in str_rule:
new_s += str(r) + '_'
NewRuleSet.add(new_s)
return NewRuleSet
def calculate_stability(testnum, max_depth = 10, rule_num = 100):
x, _ = ReadData(year=NowYear, IsTrain=True)
RuleSet_1 = generateDTreeRuleSet(x, testnum, maxdepth = max_depth)
RuleSet_1 = transferRuleSet(RuleSet_1)
RuleSet_2 = generateDTreeRuleSet(x, testnum, maxdepth= max_depth)
RuleSet_2 = transferRuleSet(RuleSet_2)
return len(RuleSet_1 & RuleSet_2) / (len(RuleSet_1 | RuleSet_2)) * 2
def baseline_stability():
testnum = 1000
print('DTExtract 1000:', calculate_stability(testnum))
testnum = 5000
print('DTExtract 5000:', calculate_stability(testnum))
testnum = 10000
print('DTExtract 10000:', calculate_stability(testnum))
def DenasValue():
model = loadModel(2019)
puppetModel = getPuppetModel("../model/" + str(2019) + "/MLP_model.h5")
R_1 = gerenateDenasRule(puppetModel, model, 5, maxlength=10)
R_1 = transferRuleSet(R_1)
R_2 = gerenateDenasRule(puppetModel, model, 5, maxlength=10)
R_2 = transferRuleSet(R_2)
return len(R_1 & R_2) / (len(R_1 | R_2))
if __name__ == '__main__':
baseline_stability()
print('Denas:', DenasValue()) | true | true |
1c35e931e160991aed5a9c408f4337157fa81653 | 249 | py | Python | src/pm/ompd/mtv_setup.py | raffenet/mpich-CVS | 2d33e2742e8c00db4f56a373fea051cc6c0ee0d0 | [
"mpich2"
] | null | null | null | src/pm/ompd/mtv_setup.py | raffenet/mpich-CVS | 2d33e2742e8c00db4f56a373fea051cc6c0ee0d0 | [
"mpich2"
] | null | null | null | src/pm/ompd/mtv_setup.py | raffenet/mpich-CVS | 2d33e2742e8c00db4f56a373fea051cc6c0ee0d0 | [
"mpich2"
] | null | null | null | #!/usr/bin/env python
#
# (C) 2001 by Argonne National Laboratory.
# See COPYRIGHT in top-level directory.
#
from distutils.core import setup, Extension
mtv = Extension("mtv",["mtv.c"])
setup(name="mtv", version="1.0", ext_modules=[mtv])
| 20.75 | 51 | 0.678715 |
from distutils.core import setup, Extension
mtv = Extension("mtv",["mtv.c"])
setup(name="mtv", version="1.0", ext_modules=[mtv])
| true | true |
1c35eb3559160c1667dc5809cf0b63a92d7f0385 | 686 | py | Python | app/stopwatch/apps.py | zigellsn/JWConfStage | 684060562a971b2dc33fe44b7f223babd4094786 | [
"Apache-2.0"
] | null | null | null | app/stopwatch/apps.py | zigellsn/JWConfStage | 684060562a971b2dc33fe44b7f223babd4094786 | [
"Apache-2.0"
] | 1 | 2019-02-20T21:15:08.000Z | 2019-02-20T21:15:08.000Z | app/stopwatch/apps.py | zigellsn/JWConfStage | 684060562a971b2dc33fe44b7f223babd4094786 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2022 Simon Zigelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.apps import AppConfig
class StopwatchConfig(AppConfig):
name = 'stopwatch'
| 34.3 | 75 | 0.752187 |
from django.apps import AppConfig
class StopwatchConfig(AppConfig):
name = 'stopwatch'
| true | true |
1c35eb64f327f5e006e5a2718bbe7c5573ad0386 | 18,292 | py | Python | venv/Lib/site-packages/IPython/core/inputtransformer.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | venv/Lib/site-packages/IPython/core/inputtransformer.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | venv/Lib/site-packages/IPython/core/inputtransformer.py | BoxicaLion/BasicMathFormulas | 4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2 | [
"MIT"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | """DEPRECATED: Input transformer classes to support IPython special syntax.
This module was deprecated in IPython 7.0, in favour of inputtransformer2.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
"""
import abc
import functools
import re
import tokenize
from tokenize import generate_tokens, untokenize, TokenError
from io import StringIO
from IPython.core.splitinput import LineInfo
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
class InputTransformer(metaclass=abc.ABCMeta):
"""Abstract base class for line-based input transformers."""
@abc.abstractmethod
def push(self, line):
"""Send a line of input to the transformer, returning the transformed
input or None if the transformer is waiting for more input.
Must be overridden by subclasses.
Implementations may raise ``SyntaxError`` if the input is invalid. No
other exceptions may be raised.
"""
pass
@abc.abstractmethod
def reset(self):
"""Return, transformed any lines that the transformer has accumulated,
and reset its internal state.
Must be overridden by subclasses.
"""
pass
@classmethod
def wrap(cls, func):
"""Can be used by subclasses as a decorator, to return a factory that
will allow instantiation with the decorated object.
"""
@functools.wraps(func)
def transformer_factory(**kwargs):
return cls(func, **kwargs)
return transformer_factory
class StatelessInputTransformer(InputTransformer):
"""Wrapper for a stateless input transformer implemented as a function."""
def __init__(self, func):
self.func = func
def __repr__(self):
return "StatelessInputTransformer(func={0!r})".format(self.func)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input."""
return self.func(line)
def reset(self):
"""No-op - exists for compatibility."""
pass
class CoroutineInputTransformer(InputTransformer):
"""Wrapper for an input transformer implemented as a coroutine."""
def __init__(self, coro, **kwargs):
# Prime it
self.coro = coro(**kwargs)
next(self.coro)
def __repr__(self):
return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input or None if the transformer is waiting for more
input.
"""
return self.coro.send(line)
def reset(self):
"""Return, transformed any lines that the transformer has
accumulated, and reset its internal state.
"""
return self.coro.send(None)
class TokenInputTransformer(InputTransformer):
"""Wrapper for a token-based input transformer.
func should accept a list of tokens (5-tuples, see tokenize docs), and
return an iterable which can be passed to tokenize.untokenize().
"""
def __init__(self, func):
self.func = func
self.buf = []
self.reset_tokenizer()
def reset_tokenizer(self):
it = iter(self.buf)
self.tokenizer = generate_tokens(it.__next__)
def push(self, line):
self.buf.append(line + '\n')
if all(l.isspace() for l in self.buf):
return self.reset()
tokens = []
stop_at_NL = False
try:
for intok in self.tokenizer:
tokens.append(intok)
t = intok[0]
if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
# Stop before we try to pull a line we don't have yet
break
elif t == tokenize.ERRORTOKEN:
stop_at_NL = True
except TokenError:
# Multi-line statement - stop and try again with the next line
self.reset_tokenizer()
return None
return self.output(tokens)
def output(self, tokens):
self.buf.clear()
self.reset_tokenizer()
return untokenize(self.func(tokens)).rstrip('\n')
def reset(self):
l = ''.join(self.buf)
self.buf.clear()
self.reset_tokenizer()
if l:
return l.rstrip('\n')
class assemble_python_lines(TokenInputTransformer):
def __init__(self):
super(assemble_python_lines, self).__init__(None)
def output(self, tokens):
return self.reset()
@CoroutineInputTransformer.wrap
def assemble_logical_lines():
r"""Join lines following explicit line continuations (\)"""
line = ''
while True:
line = (yield line)
if not line or line.isspace():
continue
parts = []
while line is not None:
if line.endswith('\\') and (not has_comment(line)):
parts.append(line[:-1])
line = (yield None) # Get another line
else:
parts.append(line)
break
# Output
line = ''.join(parts)
# Utilities
def _make_help_call(target, esc, lspace, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(lspace, next_input, t_magic_name, t_magic_arg_s)
# These define the transformations for the different escape characters.
def _tr_system(line_info):
"Translate lines escaped with: !"
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
def _tr_system2(line_info):
"Translate lines escaped with: !!"
cmd = line_info.line.lstrip()[2:]
return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
def _tr_help(line_info):
"Translate lines escaped with: ?/??"
# A naked help line should just fire the intro help screen
if not line_info.line[1:]:
return 'get_ipython().show_usage()'
return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().run_line_magic(%r, %r)'
if line_info.line.startswith(ESC_MAGIC2):
return line_info.line
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
def _tr_quote(line_info):
"Translate lines escaped with: ,"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
'", "'.join(line_info.the_rest.split()) )
def _tr_quote2(line_info):
"Translate lines escaped with: ;"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
line_info.the_rest)
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split()))
tr = { ESC_SHELL : _tr_system,
ESC_SH_CAP : _tr_system2,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
@StatelessInputTransformer.wrap
def escaped_commands(line):
"""Transform escaped commands - %magic, !system, ?help + various autocalls.
"""
if not line or line.isspace():
return line
lineinf = LineInfo(line)
if lineinf.esc not in tr:
return line
return tr[lineinf.esc](lineinf)
_initial_space_re = re.compile(r'\s*')
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
# Extra pseudotokens for multiline strings and data structures
_MULTILINE_STRING = object()
_MULTILINE_STRUCTURE = object()
def _line_tokens(line):
"""Helper for has_comment and ends_in_comment_or_string."""
readline = StringIO(line).readline
toktypes = set()
try:
for t in generate_tokens(readline):
toktypes.add(t[0])
except TokenError as e:
# There are only two cases where a TokenError is raised.
if 'multi-line string' in e.args[0]:
toktypes.add(_MULTILINE_STRING)
else:
toktypes.add(_MULTILINE_STRUCTURE)
return toktypes
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source has a comment.
"""
return (tokenize.COMMENT in _line_tokens(src))
def ends_in_comment_or_string(src):
"""Indicates whether or not an input line ends in a comment or within
a multiline string.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source ends in a comment or multiline string.
"""
toktypes = _line_tokens(src)
return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
@StatelessInputTransformer.wrap
def help_end(line):
"""Translate lines with ?/?? at the end"""
m = _help_end_re.search(line)
if m is None or ends_in_comment_or_string(line):
return line
target = m.group(1)
esc = m.group(3)
lspace = _initial_space_re.match(line).group(0)
# If we're mid-command, put it back on the next prompt for the user.
next_input = line.rstrip('?') if line.strip() != m.group(0) else None
return _make_help_call(target, esc, lspace, next_input)
@CoroutineInputTransformer.wrap
def cellmagic(end_on_blank_line=False):
"""Captures & transforms cell magics.
After a cell magic is started, this stores up any lines it gets until it is
reset (sent None).
"""
tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
cellmagic_help_re = re.compile(r'%%\w+\?')
line = ''
while True:
line = (yield line)
# consume leading empty lines
while not line:
line = (yield line)
if not line.startswith(ESC_MAGIC2):
# This isn't a cell magic, idle waiting for reset then start over
while line is not None:
line = (yield line)
continue
if cellmagic_help_re.match(line):
# This case will be handled by help_end
continue
first = line
body = []
line = (yield None)
while (line is not None) and \
((line.strip() != '') or not end_on_blank_line):
body.append(line)
line = (yield None)
# Output
magic_name, _, first = first.partition(' ')
magic_name = magic_name.lstrip(ESC_MAGIC2)
line = tpl % (magic_name, first, u'\n'.join(body))
def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts, where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
if initial_re is None:
initial_re = prompt_re
line = ''
while True:
line = (yield line)
# First line of cell
if line is None:
continue
out, n1 = initial_re.subn('', line, count=1)
if turnoff_re and not n1:
if turnoff_re.match(line):
# We're in e.g. a cell magic; disable this transformer for
# the rest of the cell.
while line is not None:
line = (yield line)
continue
line = (yield out)
if line is None:
continue
# check for any prompt on the second line of the cell,
# because people often copy from just after the first prompt,
# so we might not see it in the first line.
out, n2 = prompt_re.subn('', line, count=1)
line = (yield out)
if n1 or n2:
# Found a prompt in the first two lines - check for it in
# the rest of the cell as well.
while line is not None:
line = (yield prompt_re.sub('', line, count=1))
else:
# Prompts not in input - wait for reset
while line is not None:
line = (yield line)
@CoroutineInputTransformer.wrap
def classic_prompt():
"""Strip the >>>/... prompts of the Python interactive shell."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
initial_re = re.compile(r'^>>>( |$)')
# Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
turnoff_re = re.compile(r'^[%!]')
return _strip_prompts(prompt_re, initial_re, turnoff_re)
@CoroutineInputTransformer.wrap
def ipy_prompt():
"""Strip IPython's In [1]:/...: prompts."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
# Disable prompt stripping inside cell magics
turnoff_re = re.compile(r'^%%')
return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
@CoroutineInputTransformer.wrap
def leading_indent():
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line until it is reset.
"""
space_re = re.compile(r'^[ \t]+')
line = ''
while True:
line = (yield line)
if line is None:
continue
m = space_re.match(line)
if m:
space = m.group(0)
while line is not None:
if line.startswith(space):
line = line[len(space):]
line = (yield line)
else:
# No leading spaces - wait for reset
while line is not None:
line = (yield line)
_assign_pat = \
r'''(?P<lhs>(\s*)
([\w\.]+) # Initial identifier
(\s*,\s*
\*?[\w\.]+)* # Further identifiers for unpacking
\s*?,? # Trailing comma
)
\s*=\s*
'''
assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_system_template = '%s = get_ipython().getoutput(%r)'
@StatelessInputTransformer.wrap
def assign_from_system(line):
"""Transform assignment from system commands (e.g. files = !ls)"""
m = assign_system_re.match(line)
if m is None:
return line
return assign_system_template % m.group('lhs', 'cmd')
assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
@StatelessInputTransformer.wrap
def assign_from_magic(line):
"""Transform assignment from magic commands (e.g. a = %who_ls)"""
m = assign_magic_re.match(line)
if m is None:
return line
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
m_lhs, m_cmd = m.group('lhs', 'cmd')
t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
| 34.063315 | 95 | 0.603597 | import abc
import functools
import re
import tokenize
from tokenize import generate_tokens, untokenize, TokenError
from io import StringIO
from IPython.core.splitinput import LineInfo
ESC_SHELL = '!'
ESC_SH_CAP = '!!'
ESC_HELP = '?'
ESC_HELP2 = '??'
ESC_MAGIC = '%'
ESC_MAGIC2 = '%%'
ESC_QUOTE = ','
ESC_QUOTE2 = ';'
ESC_PAREN = '/'
ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
class InputTransformer(metaclass=abc.ABCMeta):
@abc.abstractmethod
def push(self, line):
pass
@abc.abstractmethod
def reset(self):
pass
@classmethod
def wrap(cls, func):
@functools.wraps(func)
def transformer_factory(**kwargs):
return cls(func, **kwargs)
return transformer_factory
class StatelessInputTransformer(InputTransformer):
def __init__(self, func):
self.func = func
def __repr__(self):
return "StatelessInputTransformer(func={0!r})".format(self.func)
def push(self, line):
return self.func(line)
def reset(self):
pass
class CoroutineInputTransformer(InputTransformer):
def __init__(self, coro, **kwargs):
self.coro = coro(**kwargs)
next(self.coro)
def __repr__(self):
return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
def push(self, line):
return self.coro.send(line)
def reset(self):
return self.coro.send(None)
class TokenInputTransformer(InputTransformer):
def __init__(self, func):
self.func = func
self.buf = []
self.reset_tokenizer()
def reset_tokenizer(self):
it = iter(self.buf)
self.tokenizer = generate_tokens(it.__next__)
def push(self, line):
self.buf.append(line + '\n')
if all(l.isspace() for l in self.buf):
return self.reset()
tokens = []
stop_at_NL = False
try:
for intok in self.tokenizer:
tokens.append(intok)
t = intok[0]
if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
break
elif t == tokenize.ERRORTOKEN:
stop_at_NL = True
except TokenError:
# Multi-line statement - stop and try again with the next line
self.reset_tokenizer()
return None
return self.output(tokens)
def output(self, tokens):
self.buf.clear()
self.reset_tokenizer()
return untokenize(self.func(tokens)).rstrip('\n')
def reset(self):
l = ''.join(self.buf)
self.buf.clear()
self.reset_tokenizer()
if l:
return l.rstrip('\n')
class assemble_python_lines(TokenInputTransformer):
def __init__(self):
super(assemble_python_lines, self).__init__(None)
def output(self, tokens):
return self.reset()
@CoroutineInputTransformer.wrap
def assemble_logical_lines():
line = ''
while True:
line = (yield line)
if not line or line.isspace():
continue
parts = []
while line is not None:
if line.endswith('\\') and (not has_comment(line)):
parts.append(line[:-1])
line = (yield None) # Get another line
else:
parts.append(line)
break
# Output
line = ''.join(parts)
# Utilities
def _make_help_call(target, esc, lspace, next_input=None):
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(lspace, next_input, t_magic_name, t_magic_arg_s)
# These define the transformations for the different escape characters.
def _tr_system(line_info):
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
def _tr_system2(line_info):
cmd = line_info.line.lstrip()[2:]
return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
def _tr_help(line_info):
# A naked help line should just fire the intro help screen
if not line_info.line[1:]:
return 'get_ipython().show_usage()'
return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
def _tr_magic(line_info):
tpl = '%sget_ipython().run_line_magic(%r, %r)'
if line_info.line.startswith(ESC_MAGIC2):
return line_info.line
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
def _tr_quote(line_info):
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
'", "'.join(line_info.the_rest.split()) )
def _tr_quote2(line_info):
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
line_info.the_rest)
def _tr_paren(line_info):
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split()))
tr = { ESC_SHELL : _tr_system,
ESC_SH_CAP : _tr_system2,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
@StatelessInputTransformer.wrap
def escaped_commands(line):
if not line or line.isspace():
return line
lineinf = LineInfo(line)
if lineinf.esc not in tr:
return line
return tr[lineinf.esc](lineinf)
_initial_space_re = re.compile(r'\s*')
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
# Extra pseudotokens for multiline strings and data structures
_MULTILINE_STRING = object()
_MULTILINE_STRUCTURE = object()
def _line_tokens(line):
readline = StringIO(line).readline
toktypes = set()
try:
for t in generate_tokens(readline):
toktypes.add(t[0])
except TokenError as e:
# There are only two cases where a TokenError is raised.
if 'multi-line string' in e.args[0]:
toktypes.add(_MULTILINE_STRING)
else:
toktypes.add(_MULTILINE_STRUCTURE)
return toktypes
def has_comment(src):
return (tokenize.COMMENT in _line_tokens(src))
def ends_in_comment_or_string(src):
toktypes = _line_tokens(src)
return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
@StatelessInputTransformer.wrap
def help_end(line):
m = _help_end_re.search(line)
if m is None or ends_in_comment_or_string(line):
return line
target = m.group(1)
esc = m.group(3)
lspace = _initial_space_re.match(line).group(0)
# If we're mid-command, put it back on the next prompt for the user.
next_input = line.rstrip('?') if line.strip() != m.group(0) else None
return _make_help_call(target, esc, lspace, next_input)
@CoroutineInputTransformer.wrap
def cellmagic(end_on_blank_line=False):
tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
cellmagic_help_re = re.compile(r'%%\w+\?')
line = ''
while True:
line = (yield line)
while not line:
line = (yield line)
if not line.startswith(ESC_MAGIC2):
while line is not None:
line = (yield line)
continue
if cellmagic_help_re.match(line):
# This case will be handled by help_end
continue
first = line
body = []
line = (yield None)
while (line is not None) and \
((line.strip() != '') or not end_on_blank_line):
body.append(line)
line = (yield None)
# Output
magic_name, _, first = first.partition(' ')
magic_name = magic_name.lstrip(ESC_MAGIC2)
line = tpl % (magic_name, first, u'\n'.join(body))
def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
if initial_re is None:
initial_re = prompt_re
line = ''
while True:
line = (yield line)
# First line of cell
if line is None:
continue
out, n1 = initial_re.subn('', line, count=1)
if turnoff_re and not n1:
if turnoff_re.match(line):
# We're in e.g. a cell magic; disable this transformer for
while line is not None:
line = (yield line)
continue
line = (yield out)
if line is None:
continue
out, n2 = prompt_re.subn('', line, count=1)
line = (yield out)
if n1 or n2:
while line is not None:
line = (yield prompt_re.sub('', line, count=1))
else:
while line is not None:
line = (yield line)
@CoroutineInputTransformer.wrap
def classic_prompt():
prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
initial_re = re.compile(r'^>>>( |$)')
turnoff_re = re.compile(r'^[%!]')
return _strip_prompts(prompt_re, initial_re, turnoff_re)
@CoroutineInputTransformer.wrap
def ipy_prompt():
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
# Disable prompt stripping inside cell magics
turnoff_re = re.compile(r'^%%')
return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
@CoroutineInputTransformer.wrap
def leading_indent():
space_re = re.compile(r'^[ \t]+')
line = ''
while True:
line = (yield line)
if line is None:
continue
m = space_re.match(line)
if m:
space = m.group(0)
while line is not None:
if line.startswith(space):
line = line[len(space):]
line = (yield line)
else:
# No leading spaces - wait for reset
while line is not None:
line = (yield line)
_assign_pat = \
r'''(?P<lhs>(\s*)
([\w\.]+) # Initial identifier
(\s*,\s*
\*?[\w\.]+)* # Further identifiers for unpacking
\s*?,? # Trailing comma
)
\s*=\s*
'''
assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_system_template = '%s = get_ipython().getoutput(%r)'
@StatelessInputTransformer.wrap
def assign_from_system(line):
m = assign_system_re.match(line)
if m is None:
return line
return assign_system_template % m.group('lhs', 'cmd')
assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
@StatelessInputTransformer.wrap
def assign_from_magic(line):
m = assign_magic_re.match(line)
if m is None:
return line
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
m_lhs, m_cmd = m.group('lhs', 'cmd')
t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
| true | true |
1c35eb725e067bd6026041677434b57cce4af8f5 | 6,152 | py | Python | fem/gui/vtk_widget/plotting_toolbar/_plotting_dock.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | 1 | 2019-08-03T21:40:26.000Z | 2019-08-03T21:40:26.000Z | fem/gui/vtk_widget/plotting_toolbar/_plotting_dock.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | null | null | null | fem/gui/vtk_widget/plotting_toolbar/_plotting_dock.py | mjredmond/FEMApp | dd8cc53acf80d0a1bb83ce9c89bcfd51e85c6be8 | [
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import
from qtpy import QtWidgets
from ._plotting_ui import Ui_DockWidget
from ..vtk_graphics import VTKGraphics
from ..vtk_graphics.pipelines.picked import PickedSelection
vtk_graphics = VTKGraphics.instance()
class PlainTextEditDelegate(object):
def __init__(self, parent, dock_widget):
self.parent = parent
""":type: QtGui.QPlainTextEdit"""
self.dock_widget = dock_widget
""":type: PlottingDock"""
self.delegate = QtWidgets.QPlainTextEdit()
self.parent.enterEvent = self.enter_event
self.parent.leaveEvent = self.leave_event
def enter_event(self, *args):
picked_selection = vtk_graphics.picked_selection
picked_selection.data_changed.block()
picked_selection.set_from_str(str(self.parent.toPlainText()))
picked_selection.data_changed.unblock()
vtk_graphics.visible_filter.Modified()
# sometimes an exception occurs with closing the app, because the interactor has been set to None already
try:
vtk_graphics.render()
except AttributeError:
pass
def leave_event(self, *args):
picked_selection = vtk_graphics.picked_selection
picked_selection.data_changed.block()
picked_selection.set_from_str(str(self.parent.toPlainText()))
picked_selection.data_changed.unblock()
vtk_graphics.visible_filter.Modified()
# sometimes an exception occurs with closing the app, because the interactor has been set to None already
try:
vtk_graphics.render()
except AttributeError:
pass
class PlottingDock(QtWidgets.QDockWidget):
def __init__(self, main_window):
super(PlottingDock, self).__init__(main_window)
self.main_window = main_window
self.ui = Ui_DockWidget()
self.ui.setupUi(self)
self.ui.plot_button.clicked.connect(self._plot)
self.ui.erase_button.clicked.connect(self._erase)
self.ui.done_button.clicked.connect(self._done)
self.ui.plainTextEdit.textChanged.connect(self._text_changed)
self.ui.plainTextEdit.setReadOnly(False)
self.ui.listWidget.itemChanged.connect(self._item_changed)
self.delegate = PlainTextEditDelegate(self.ui.plainTextEdit, self)
self.setWindowTitle('Hide/Show FEM')
self._selection = PickedSelection()
self._selection.data_changed.connect(self._picked_data_changed)
def show_and_register(self):
self.show()
# from fem.gui.vtk_widget.vtk_graphics import VTKGraphics
# vtk_graphics = VTKGraphics.instance()
# vtk_graphics.picked_selection.data_changed.disconnect_all()
# vtk_graphics.picked_selection.data_changed.connect(self._picked_data_changed)
from ..vtk_graphics.picking import PickingManager
picking_manager = PickingManager.instance()
picking_manager.register_selection(self._selection)
self.main_window.show_dock(self)
def build(self, vtk_config):
# self.vtk_config = vtk_config
""":type: vtk_fem.vtk_widget.vtk_widget.vtk_config.VTKConfig"""
# self.vtk_config.picked_selection.data_changed.connect(self._picked_data_changed)
pass
def _plot(self):
vtk_graphics.plot_fem(str(self.ui.plainTextEdit.toPlainText()))
def _erase(self):
vtk_graphics.erase_fem(str(self.ui.plainTextEdit.toPlainText()))
def _done(self):
self.hide()
def closeEvent(self, QCloseEvent):
super(PlottingDock, self).closeEvent(QCloseEvent)
def _picked_data_changed(self, *args):
self.ui.plainTextEdit.blockSignals(True)
self.ui.plainTextEdit.setPlainText(self._selection.to_str())
self.ui.plainTextEdit.blockSignals(False)
def _text_changed(self, *args, **kwargs):
print(args, kwargs)
def _item_changed(self, item):
item_txt = item.text()
item_state = item.checkState()
if item_txt == "All":
if item_state == 0:
vtk_graphics.visible_types.remove_all()
else:
vtk_graphics.visible_types.add_all()
elif item_txt == "FEM":
if item_state == 0:
vtk_graphics.visible_types.remove_all_fem()
else:
vtk_graphics.visible_types.add_all_fem()
elif item_txt == "Grid":
if item_state == 0:
vtk_graphics.visible_types.remove_all_grid_fem()
else:
vtk_graphics.visible_types.add_all_grid_fem()
elif item_txt == "Point Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_point_fem()
else:
vtk_graphics.visible_types.add_all_point_fem()
elif item_txt == "Line Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_line_fem()
else:
vtk_graphics.visible_types.add_all_line_fem()
elif item_txt == "Tri Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_tri_fem()
else:
vtk_graphics.visible_types.add_all_tri_fem()
elif item_txt == "Quad Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_quad_fem()
else:
vtk_graphics.visible_types.add_all_quad_fem()
elif item_txt == "Shell Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_shell_fem()
else:
vtk_graphics.visible_types.add_all_shell_fem()
elif item_txt == "MPC's":
if item_state == 0:
vtk_graphics.visible_types.remove_all_mpc_fem()
else:
vtk_graphics.visible_types.add_all_mpc_fem()
vtk_graphics.visible_filter.Modified()
vtk_graphics.render()
def enterEvent(self, *args, **kwargs):
print('enter')
def leaveEvent(self, *args, **kwargs):
print('leave')
| 33.075269 | 113 | 0.653283 | from __future__ import print_function, absolute_import
from qtpy import QtWidgets
from ._plotting_ui import Ui_DockWidget
from ..vtk_graphics import VTKGraphics
from ..vtk_graphics.pipelines.picked import PickedSelection
vtk_graphics = VTKGraphics.instance()
class PlainTextEditDelegate(object):
def __init__(self, parent, dock_widget):
self.parent = parent
self.dock_widget = dock_widget
self.delegate = QtWidgets.QPlainTextEdit()
self.parent.enterEvent = self.enter_event
self.parent.leaveEvent = self.leave_event
def enter_event(self, *args):
picked_selection = vtk_graphics.picked_selection
picked_selection.data_changed.block()
picked_selection.set_from_str(str(self.parent.toPlainText()))
picked_selection.data_changed.unblock()
vtk_graphics.visible_filter.Modified()
try:
vtk_graphics.render()
except AttributeError:
pass
def leave_event(self, *args):
picked_selection = vtk_graphics.picked_selection
picked_selection.data_changed.block()
picked_selection.set_from_str(str(self.parent.toPlainText()))
picked_selection.data_changed.unblock()
vtk_graphics.visible_filter.Modified()
try:
vtk_graphics.render()
except AttributeError:
pass
class PlottingDock(QtWidgets.QDockWidget):
def __init__(self, main_window):
super(PlottingDock, self).__init__(main_window)
self.main_window = main_window
self.ui = Ui_DockWidget()
self.ui.setupUi(self)
self.ui.plot_button.clicked.connect(self._plot)
self.ui.erase_button.clicked.connect(self._erase)
self.ui.done_button.clicked.connect(self._done)
self.ui.plainTextEdit.textChanged.connect(self._text_changed)
self.ui.plainTextEdit.setReadOnly(False)
self.ui.listWidget.itemChanged.connect(self._item_changed)
self.delegate = PlainTextEditDelegate(self.ui.plainTextEdit, self)
self.setWindowTitle('Hide/Show FEM')
self._selection = PickedSelection()
self._selection.data_changed.connect(self._picked_data_changed)
def show_and_register(self):
self.show()
from ..vtk_graphics.picking import PickingManager
picking_manager = PickingManager.instance()
picking_manager.register_selection(self._selection)
self.main_window.show_dock(self)
def build(self, vtk_config):
pass
def _plot(self):
vtk_graphics.plot_fem(str(self.ui.plainTextEdit.toPlainText()))
def _erase(self):
vtk_graphics.erase_fem(str(self.ui.plainTextEdit.toPlainText()))
def _done(self):
self.hide()
def closeEvent(self, QCloseEvent):
super(PlottingDock, self).closeEvent(QCloseEvent)
def _picked_data_changed(self, *args):
self.ui.plainTextEdit.blockSignals(True)
self.ui.plainTextEdit.setPlainText(self._selection.to_str())
self.ui.plainTextEdit.blockSignals(False)
def _text_changed(self, *args, **kwargs):
print(args, kwargs)
def _item_changed(self, item):
item_txt = item.text()
item_state = item.checkState()
if item_txt == "All":
if item_state == 0:
vtk_graphics.visible_types.remove_all()
else:
vtk_graphics.visible_types.add_all()
elif item_txt == "FEM":
if item_state == 0:
vtk_graphics.visible_types.remove_all_fem()
else:
vtk_graphics.visible_types.add_all_fem()
elif item_txt == "Grid":
if item_state == 0:
vtk_graphics.visible_types.remove_all_grid_fem()
else:
vtk_graphics.visible_types.add_all_grid_fem()
elif item_txt == "Point Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_point_fem()
else:
vtk_graphics.visible_types.add_all_point_fem()
elif item_txt == "Line Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_line_fem()
else:
vtk_graphics.visible_types.add_all_line_fem()
elif item_txt == "Tri Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_tri_fem()
else:
vtk_graphics.visible_types.add_all_tri_fem()
elif item_txt == "Quad Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_quad_fem()
else:
vtk_graphics.visible_types.add_all_quad_fem()
elif item_txt == "Shell Elements":
if item_state == 0:
vtk_graphics.visible_types.remove_all_shell_fem()
else:
vtk_graphics.visible_types.add_all_shell_fem()
elif item_txt == "MPC's":
if item_state == 0:
vtk_graphics.visible_types.remove_all_mpc_fem()
else:
vtk_graphics.visible_types.add_all_mpc_fem()
vtk_graphics.visible_filter.Modified()
vtk_graphics.render()
def enterEvent(self, *args, **kwargs):
print('enter')
def leaveEvent(self, *args, **kwargs):
print('leave')
| true | true |
1c35ebb904f3d19508064d2d7eea1e2890071729 | 7,721 | py | Python | evaluate.py | AarthiKasirajan/transformer-kgc | 4c8f34a6df9f8e6cfc0717052085eb926bfbefa2 | [
"Apache-2.0"
] | 4 | 2022-03-22T04:08:19.000Z | 2022-03-30T06:44:26.000Z | evaluate.py | apoorvumang/transformer-kgc | b12e3161054e67c2f6aa5b6d4878ac2e0eb2f4f5 | [
"Apache-2.0"
] | 1 | 2022-03-30T16:29:37.000Z | 2022-03-31T12:13:43.000Z | evaluate.py | apoorvumang/kgt5 | d660dce63133f0fd3fa4d909c92b27dad6395153 | [
"Apache-2.0"
] | 2 | 2021-11-01T11:52:59.000Z | 2022-01-09T18:43:55.000Z | import argparse
import math
from tqdm import tqdm
from dataset import T5_Dataset
from torch.utils.data import DataLoader
from utils_accelerate import *
import numpy as np
from typing import Dict
from collections import defaultdict
class Evaluator:
def __init__(self, dataset: T5_Dataset, model, args):
self.device = args.device
self.dataset = dataset
self.model = model.to(self.device)
self.num_workers = args.num_workers
self.batch_size = args.batch_size
self.chunk_size = args.chunk_size
self.data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=dataset._collate_eval_2,
)
self.filter_dicts = dict()
self.filter_dicts["train"] = self.create_filter_dict("train")
self.filter_dicts["valid"] = self.create_filter_dict("valid")
self.filter_dicts["test"] = self.create_filter_dict("test")
def create_filter_dict(self, split: str) -> Dict[str, int]:
data = self.dataset.split(split)
filter_dict = defaultdict(list)
for input, output in zip(data["inputs"], data["outputs"]):
filter_dict[input].append(self.dataset.entity_string_to_id[output])
return filter_dict
@torch.no_grad()
def eval(self):
self.model.eval()
loader = tqdm(self.data_loader, total=len(self.data_loader), unit="batch")
ranks = {
"unfiltered": list(),
"filtered": list(),
}
for steps, batch in enumerate(loader):
ranks_in_batch = {
"unfiltered": list(),
"filtered": list()
}
input_ids, attention_mask, label_strings, input_strings = batch
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
# labels = labels.to(self.device)
input_ids_repeated = torch.repeat_interleave(
input_ids, len(self.dataset.entity_strings), dim=0
)
attention_mask_repeated = torch.repeat_interleave(
attention_mask, len(self.dataset.entity_strings), dim=0
)
tokenized_entities = self.dataset.tokenized_entities.input_ids.to(
self.device
)
# todo: for filtering we need to use only the filtered entities per triple here
all_entities_repeated = tokenized_entities.repeat([self.batch_size, 1])
summed_logit_chunks = []
# process chunk by chunk
for chunk_number in range(
math.ceil(len(input_ids_repeated) / self.chunk_size)
):
chunk_start = self.chunk_size * chunk_number
chunk_end = min(
self.chunk_size * (chunk_number + 1), len(input_ids_repeated)
)
current_chunk_size = chunk_end - chunk_start
outputs_chunk = self.model(
input_ids=input_ids_repeated[chunk_start:chunk_end],
attention_mask=attention_mask_repeated[chunk_start:chunk_end],
labels=all_entities_repeated[chunk_start:chunk_end],
)
logits_chunk = outputs_chunk.logits
soft_logits_chunk = torch.log_softmax(logits_chunk, dim=2)
coordinates = all_entities_repeated[chunk_start:chunk_end].view(current_chunk_size, -1, 1)
# set padded logits to zero
padded_mask = (coordinates == 0).squeeze()
soft_logits_chunk[padded_mask] = 0
needed_soft_logits_chunk = torch.gather(
soft_logits_chunk,
2,
coordinates
).view(current_chunk_size, -1)
summed_logits = torch.sum(needed_soft_logits_chunk, dim=1)
summed_logit_chunks.append(summed_logits)
summed_logits = torch.cat(summed_logit_chunks)
for summed_logits_per_triple, input_string, label in zip(
summed_logits.split(len(self.dataset.entity_strings)), input_strings, label_strings
):
# todo: currently we are calculating best rank on equality
# change to mean
arg_sorted = torch.argsort(summed_logits_per_triple, descending=True)
entity_id = self.dataset.entity_string_to_id[label]
rank = (
(arg_sorted == entity_id)
.nonzero(as_tuple=True)[0]
.item()
)
print(rank)
ranks_in_batch["unfiltered"].append(rank)
# now filter
true_score = summed_logits_per_triple[entity_id].clone()
for filter_dict in self.filter_dicts.values():
summed_logits_per_triple[filter_dict[input_string]] = -float("inf")
summed_logits_per_triple[entity_id] = true_score
arg_sorted = torch.argsort(summed_logits_per_triple, descending=True)
rank = (
(arg_sorted == entity_id)
.nonzero(as_tuple=True)[0]
.item()
)
print(rank)
ranks_in_batch["filtered"].append(rank)
ranks["filtered"].extend(ranks_in_batch["filtered"])
ranks["unfiltered"].extend(ranks_in_batch["unfiltered"])
for setting, list_of_ranks in ranks.items():
ranks[setting] = np.array(list_of_ranks, dtype=np.float32) + 1
# ranks = np.array(ranks, dtype=np.float32)
# # add 1 to have best rank 1 not 0
# ranks += 1
print("MR", ranks["unfiltered"].mean())
print("MR-filtered", ranks["filtered"].mean())
print("MRR", np.power(ranks["unfiltered"], -1).mean())
print("MRR-filtered", np.power(ranks["filtered"], -1).mean())
print("Hits@1", (ranks["unfiltered"] == 1).sum() / len(self.dataset))
print("Hits@1-filtered", (ranks["filtered"] == 1).sum() / len(self.dataset))
print("Hits@10", (ranks["unfiltered"] <= 10).sum() / len(self.dataset))
print("Hits@10-filtered", (ranks["filtered"] <= 10).sum() / len(self.dataset))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--prefix", type=str, default="temp")
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--dataset", type=str, default="codex-m")
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--chunk_size", type=int, default=50)
parser.add_argument("--num_beams", type=int, default=1)
parser.add_argument("--num_predictions", type=int, default=1)
parser.add_argument("--length_penalty", type=float, default=0.6)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--split", type=str, default="test")
args = parser.parse_args()
valid_dataset = T5_Dataset(args.split, dataset_name=args.dataset)
checkpoint_location = "models/{}/{}.pt".format(args.prefix, args.checkpoint)
print("Using %s" % checkpoint_location)
model = load_accelerator_model(checkpoint_location, only_model=True)
evaluator = Evaluator(dataset=valid_dataset, model=model, args=args)
evaluator.eval()
if __name__ == "__main__":
main()
| 46.233533 | 107 | 0.58969 | import argparse
import math
from tqdm import tqdm
from dataset import T5_Dataset
from torch.utils.data import DataLoader
from utils_accelerate import *
import numpy as np
from typing import Dict
from collections import defaultdict
class Evaluator:
def __init__(self, dataset: T5_Dataset, model, args):
self.device = args.device
self.dataset = dataset
self.model = model.to(self.device)
self.num_workers = args.num_workers
self.batch_size = args.batch_size
self.chunk_size = args.chunk_size
self.data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
collate_fn=dataset._collate_eval_2,
)
self.filter_dicts = dict()
self.filter_dicts["train"] = self.create_filter_dict("train")
self.filter_dicts["valid"] = self.create_filter_dict("valid")
self.filter_dicts["test"] = self.create_filter_dict("test")
def create_filter_dict(self, split: str) -> Dict[str, int]:
data = self.dataset.split(split)
filter_dict = defaultdict(list)
for input, output in zip(data["inputs"], data["outputs"]):
filter_dict[input].append(self.dataset.entity_string_to_id[output])
return filter_dict
@torch.no_grad()
def eval(self):
self.model.eval()
loader = tqdm(self.data_loader, total=len(self.data_loader), unit="batch")
ranks = {
"unfiltered": list(),
"filtered": list(),
}
for steps, batch in enumerate(loader):
ranks_in_batch = {
"unfiltered": list(),
"filtered": list()
}
input_ids, attention_mask, label_strings, input_strings = batch
input_ids = input_ids.to(self.device)
attention_mask = attention_mask.to(self.device)
input_ids_repeated = torch.repeat_interleave(
input_ids, len(self.dataset.entity_strings), dim=0
)
attention_mask_repeated = torch.repeat_interleave(
attention_mask, len(self.dataset.entity_strings), dim=0
)
tokenized_entities = self.dataset.tokenized_entities.input_ids.to(
self.device
)
all_entities_repeated = tokenized_entities.repeat([self.batch_size, 1])
summed_logit_chunks = []
for chunk_number in range(
math.ceil(len(input_ids_repeated) / self.chunk_size)
):
chunk_start = self.chunk_size * chunk_number
chunk_end = min(
self.chunk_size * (chunk_number + 1), len(input_ids_repeated)
)
current_chunk_size = chunk_end - chunk_start
outputs_chunk = self.model(
input_ids=input_ids_repeated[chunk_start:chunk_end],
attention_mask=attention_mask_repeated[chunk_start:chunk_end],
labels=all_entities_repeated[chunk_start:chunk_end],
)
logits_chunk = outputs_chunk.logits
soft_logits_chunk = torch.log_softmax(logits_chunk, dim=2)
coordinates = all_entities_repeated[chunk_start:chunk_end].view(current_chunk_size, -1, 1)
padded_mask = (coordinates == 0).squeeze()
soft_logits_chunk[padded_mask] = 0
needed_soft_logits_chunk = torch.gather(
soft_logits_chunk,
2,
coordinates
).view(current_chunk_size, -1)
summed_logits = torch.sum(needed_soft_logits_chunk, dim=1)
summed_logit_chunks.append(summed_logits)
summed_logits = torch.cat(summed_logit_chunks)
for summed_logits_per_triple, input_string, label in zip(
summed_logits.split(len(self.dataset.entity_strings)), input_strings, label_strings
):
arg_sorted = torch.argsort(summed_logits_per_triple, descending=True)
entity_id = self.dataset.entity_string_to_id[label]
rank = (
(arg_sorted == entity_id)
.nonzero(as_tuple=True)[0]
.item()
)
print(rank)
ranks_in_batch["unfiltered"].append(rank)
true_score = summed_logits_per_triple[entity_id].clone()
for filter_dict in self.filter_dicts.values():
summed_logits_per_triple[filter_dict[input_string]] = -float("inf")
summed_logits_per_triple[entity_id] = true_score
arg_sorted = torch.argsort(summed_logits_per_triple, descending=True)
rank = (
(arg_sorted == entity_id)
.nonzero(as_tuple=True)[0]
.item()
)
print(rank)
ranks_in_batch["filtered"].append(rank)
ranks["filtered"].extend(ranks_in_batch["filtered"])
ranks["unfiltered"].extend(ranks_in_batch["unfiltered"])
for setting, list_of_ranks in ranks.items():
ranks[setting] = np.array(list_of_ranks, dtype=np.float32) + 1
s["unfiltered"].mean())
print("MR-filtered", ranks["filtered"].mean())
print("MRR", np.power(ranks["unfiltered"], -1).mean())
print("MRR-filtered", np.power(ranks["filtered"], -1).mean())
print("Hits@1", (ranks["unfiltered"] == 1).sum() / len(self.dataset))
print("Hits@1-filtered", (ranks["filtered"] == 1).sum() / len(self.dataset))
print("Hits@10", (ranks["unfiltered"] <= 10).sum() / len(self.dataset))
print("Hits@10-filtered", (ranks["filtered"] <= 10).sum() / len(self.dataset))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--prefix", type=str, default="temp")
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--dataset", type=str, default="codex-m")
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--chunk_size", type=int, default=50)
parser.add_argument("--num_beams", type=int, default=1)
parser.add_argument("--num_predictions", type=int, default=1)
parser.add_argument("--length_penalty", type=float, default=0.6)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--split", type=str, default="test")
args = parser.parse_args()
valid_dataset = T5_Dataset(args.split, dataset_name=args.dataset)
checkpoint_location = "models/{}/{}.pt".format(args.prefix, args.checkpoint)
print("Using %s" % checkpoint_location)
model = load_accelerator_model(checkpoint_location, only_model=True)
evaluator = Evaluator(dataset=valid_dataset, model=model, args=args)
evaluator.eval()
if __name__ == "__main__":
main()
| true | true |
1c35ec20f4502cb2ae323ef70a0d365e7cd2e969 | 2,458 | py | Python | test/TestMas.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 2 | 2022-02-21T17:03:04.000Z | 2022-02-24T17:14:02.000Z | test/TestMas.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 2 | 2020-02-06T10:03:35.000Z | 2022-03-01T09:39:54.000Z | test/TestMas.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 1 | 2019-08-01T06:20:58.000Z | 2019-08-01T06:20:58.000Z | from Artesian import *
import helpers
import unittest
cfg = ArtesianConfig("baseaddr","apikey")
qs = QueryService(cfg)
class TestMas(unittest.TestCase):
@helpers.TrackRequests
def test_Null_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillNull() \
.execute()
self.assertEqual(requests.getQs()["fillerK"],"Null")
@helpers.TrackRequests
def test_No_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillNone() \
.execute()
self.assertEqual(requests.getQs()["fillerK"],"NoFill")
@helpers.TrackRequests
def test_Latest_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillLatestValue("P5D") \
.execute()
query = requests.getQs()
self.assertEqual(query["fillerK"],"LatestValidValue")
self.assertEqual(query["fillerP"],"P5D")
@helpers.TrackRequests
def test_Custom_Value_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillCustomValue(
settlement = 1,
open = 2,
close = 3,
high = 4,
low = 5,
volumePaid = 6,
volueGiven = 7,
volume = 8
) \
.execute()
query = requests.getQs()
self.assertEqual(query["fillerK"],"CustomValue")
self.assertEqual(query["fillerDVs"],"1")
self.assertEqual(query["fillerDVo"],"2")
self.assertEqual(query["fillerDVc"],"3")
self.assertEqual(query["fillerDVh"],"4")
self.assertEqual(query["fillerDVl"],"5")
self.assertEqual(query["fillerDVvp"],"6")
self.assertEqual(query["fillerDVvg"],"7")
self.assertEqual(query["fillerDVvt"],"8") | 34.138889 | 62 | 0.546786 | from Artesian import *
import helpers
import unittest
cfg = ArtesianConfig("baseaddr","apikey")
qs = QueryService(cfg)
class TestMas(unittest.TestCase):
@helpers.TrackRequests
def test_Null_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillNull() \
.execute()
self.assertEqual(requests.getQs()["fillerK"],"Null")
@helpers.TrackRequests
def test_No_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillNone() \
.execute()
self.assertEqual(requests.getQs()["fillerK"],"NoFill")
@helpers.TrackRequests
def test_Latest_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillLatestValue("P5D") \
.execute()
query = requests.getQs()
self.assertEqual(query["fillerK"],"LatestValidValue")
self.assertEqual(query["fillerP"],"P5D")
@helpers.TrackRequests
def test_Custom_Value_Fill(self, requests):
url = qs.createMarketAssessment() \
.forMarketData([100000001]) \
.forProducts(["M+1", "M+2"]) \
.inAbsoluteDateRange("2018-01-01","2018-01-02") \
.withFillCustomValue(
settlement = 1,
open = 2,
close = 3,
high = 4,
low = 5,
volumePaid = 6,
volueGiven = 7,
volume = 8
) \
.execute()
query = requests.getQs()
self.assertEqual(query["fillerK"],"CustomValue")
self.assertEqual(query["fillerDVs"],"1")
self.assertEqual(query["fillerDVo"],"2")
self.assertEqual(query["fillerDVc"],"3")
self.assertEqual(query["fillerDVh"],"4")
self.assertEqual(query["fillerDVl"],"5")
self.assertEqual(query["fillerDVvp"],"6")
self.assertEqual(query["fillerDVvg"],"7")
self.assertEqual(query["fillerDVvt"],"8") | true | true |
1c35ec795e6542ac4eda2009f43a9a126cd7ef64 | 574 | py | Python | src/main/resources/pytz/zoneinfo/Africa/Nouakchott.py | TheEin/swagger-maven-plugin | cf93dce2d5c8d3534f4cf8c612b11e2d2313871b | [
"Apache-2.0"
] | 65 | 2015-11-14T13:46:01.000Z | 2021-08-14T05:54:04.000Z | lib/pytz/zoneinfo/Africa/Nouakchott.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 13 | 2016-03-31T20:00:17.000Z | 2021-08-20T14:52:31.000Z | lib/pytz/zoneinfo/Africa/Nouakchott.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 20 | 2015-03-18T08:41:37.000Z | 2020-12-18T02:58:30.000Z | '''tzinfo timezone information for Africa/Nouakchott.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Nouakchott(DstTzInfo):
'''Africa/Nouakchott timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Nouakchott'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,1,1,1,3,48),
d(1934,2,26,0,0,0),
d(1960,11,28,1,0,0),
]
_transition_info = [
i(-3840,0,'LMT'),
i(0,0,'GMT'),
i(-3600,0,'WAT'),
i(0,0,'GMT'),
]
Nouakchott = Nouakchott()
| 21.259259 | 80 | 0.672474 | from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Nouakchott(DstTzInfo):
zone = 'Africa/Nouakchott'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1912,1,1,1,3,48),
d(1934,2,26,0,0,0),
d(1960,11,28,1,0,0),
]
_transition_info = [
i(-3840,0,'LMT'),
i(0,0,'GMT'),
i(-3600,0,'WAT'),
i(0,0,'GMT'),
]
Nouakchott = Nouakchott()
| true | true |
1c35ecea6b12db0ebb681025bb6882814f532111 | 7,105 | py | Python | InnerEye/ML/configs/segmentation/HeadAndNeckBase.py | shivp950/InnerEye-DeepLearning | 014c74e34f990d9a7b2b7bf7f6f632f29be33559 | [
"MIT"
] | 2 | 2021-02-19T03:16:19.000Z | 2022-02-08T01:40:09.000Z | InnerEye/ML/configs/segmentation/HeadAndNeckBase.py | shivp950/InnerEye-DeepLearning | 014c74e34f990d9a7b2b7bf7f6f632f29be33559 | [
"MIT"
] | null | null | null | InnerEye/ML/configs/segmentation/HeadAndNeckBase.py | shivp950/InnerEye-DeepLearning | 014c74e34f990d9a7b2b7bf7f6f632f29be33559 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import random
from typing import Any
import pandas as pd
from InnerEye.ML.config import MixtureLossComponent, PhotometricNormalizationMethod, SegmentationLoss, \
SegmentationModelBase, SliceExclusionRule, SummedProbabilityRule, equally_weighted_classes
from InnerEye.ML.deep_learning_config import OptimizerType
from InnerEye.ML.utils.model_metadata_util import generate_random_colours_list
from InnerEye.ML.utils.split_dataset import DatasetSplits
# List of structures to segment. The order is important, because different values of num_structures
# in the constructor will select different prefixes of the list.
STRUCTURE_LIST = ["external", "parotid_l", "parotid_r", "smg_l", "smg_r", "spinal_cord", "brainstem",
"globe_l", "globe_r", "mandible", "spc_muscle", "mpc_muscle", "cochlea_l", "cochlea_r",
"lens_l", "lens_r", "optic_chiasm", "optic_nerve_l", "optic_nerve_r", "pituitary_gland",
"lacrimal_gland_l", "lacrimal_gland_r"]
RANDOM_COLOUR_GENERATOR = random.Random(0)
COLOURS = generate_random_colours_list(RANDOM_COLOUR_GENERATOR, len(STRUCTURE_LIST))
FILL_HOLES = [True] * len(STRUCTURE_LIST)
# This configuration needs to be supplied with a value for azure_dataset_id that refers to your
# dataset. You may also supply a value for num_structures, feature_channels or any other feature. For example,
# with the appropriate dataset, this would build the model whose results are reported in the InnerEye team's
# paper:
#
# class HeadAndNeckPaper(HeadAndNeckBase):
#
# def __init__(self):
# super().__init__(
# azure_dataset_id="foo_bar_baz",
# num_structures=10)
class HeadAndNeckBase(SegmentationModelBase):
"""
Head and Neck radiotherapy image segmentation model.
"""
def __init__(self, num_structures: int = 0, **kwargs: Any) -> None:
"""
:param num_structures: number of structures from STRUCTURE_LIST to predict (default: all structures)
:param kwargs: other args from subclass
"""
# Number of training epochs
num_epochs = 120
# Number of structures to predict; if positive but less than the length of STRUCTURE_LIST, the relevant prefix
# of STRUCTURE_LIST will be predicted.
if num_structures <= 0 or num_structures > len(STRUCTURE_LIST):
num_structures = len(STRUCTURE_LIST)
ground_truth_ids = STRUCTURE_LIST[:num_structures]
colours = COLOURS[:num_structures]
fill_holes = FILL_HOLES[:num_structures]
ground_truth_ids_display_names = [f"zz_{x}" for x in ground_truth_ids]
# The amount of GPU memory required increases with both the number of structures and the
# number of feature channels. The following is a sensible default to avoid out-of-memory,
# but you can override is by passing in another (singleton list) value for feature_channels
# from a subclass.
num_feature_channels = 32 if num_structures <= 20 else 26
bg_weight = 0.02 if len(ground_truth_ids) > 1 else 0.25
# In case of vertical overlap between brainstem and spinal_cord, we separate them
# by converting brainstem voxels to cord, as the latter is clinically more sensitive.
# We do the same to separate SPC and MPC; in this case, the direction of change is unimportant,
# so we choose SPC-to-MPC arbitrarily.
slice_exclusion_rules = []
summed_probability_rules = []
if "brainstem" in ground_truth_ids and "spinal_cord" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("brainstem", "spinal_cord", False))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("spinal_cord", "brainstem", "external"))
if "spc_muscle" in ground_truth_ids and "mpc_muscle" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("spc_muscle", "mpc_muscle", False))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("mpc_muscle", "spc_muscle", "external"))
if "optic_chiasm" in ground_truth_ids and "pituitary_gland" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("optic_chiasm", "pituitary_gland", True))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("optic_chiasm", "pituitary_gland", "external"))
super().__init__(
should_validate=False, # we'll validate after kwargs are added
num_epochs=num_epochs,
save_start_epoch=num_epochs,
save_step_epochs=num_epochs,
architecture="UNet3D",
kernel_size=3,
train_batch_size=4,
inference_batch_size=1,
feature_channels=[num_feature_channels],
crop_size=(96, 288, 288),
test_crop_size=(144, 512, 512),
inference_stride_size=(72, 256, 256),
image_channels=["ct"],
norm_method=PhotometricNormalizationMethod.CtWindow,
level=50,
window=600,
start_epoch=0,
l_rate=1e-3,
min_l_rate=1e-5,
l_rate_polynomial_gamma=0.9,
optimizer_type=OptimizerType.Adam,
opt_eps=1e-4,
adam_betas=(0.9, 0.999),
momentum=0.9,
test_diff_epochs=1,
test_step_epochs=1,
use_mixed_precision=True,
use_model_parallel=True,
monitoring_interval_seconds=0,
num_dataload_workers=4,
loss_type=SegmentationLoss.Mixture,
mixture_loss_components=[MixtureLossComponent(0.5, SegmentationLoss.Focal, 0.2),
MixtureLossComponent(0.5, SegmentationLoss.SoftDice, 0.1)],
ground_truth_ids=ground_truth_ids,
ground_truth_ids_display_names=ground_truth_ids_display_names,
largest_connected_component_foreground_classes=ground_truth_ids,
colours=colours,
fill_holes=fill_holes,
class_weights=equally_weighted_classes(ground_truth_ids, background_weight=bg_weight),
slice_exclusion_rules=slice_exclusion_rules,
summed_probability_rules=summed_probability_rules,
)
self.add_and_validate(kwargs)
def get_model_train_test_dataset_splits(self, dataset_df: pd.DataFrame) -> DatasetSplits:
return DatasetSplits.from_proportions(dataset_df, proportion_train=0.8, proportion_val=0.05,
proportion_test=0.15,
random_seed=0)
| 53.421053 | 118 | 0.666151 |
import random
from typing import Any
import pandas as pd
from InnerEye.ML.config import MixtureLossComponent, PhotometricNormalizationMethod, SegmentationLoss, \
SegmentationModelBase, SliceExclusionRule, SummedProbabilityRule, equally_weighted_classes
from InnerEye.ML.deep_learning_config import OptimizerType
from InnerEye.ML.utils.model_metadata_util import generate_random_colours_list
from InnerEye.ML.utils.split_dataset import DatasetSplits
STRUCTURE_LIST = ["external", "parotid_l", "parotid_r", "smg_l", "smg_r", "spinal_cord", "brainstem",
"globe_l", "globe_r", "mandible", "spc_muscle", "mpc_muscle", "cochlea_l", "cochlea_r",
"lens_l", "lens_r", "optic_chiasm", "optic_nerve_l", "optic_nerve_r", "pituitary_gland",
"lacrimal_gland_l", "lacrimal_gland_r"]
RANDOM_COLOUR_GENERATOR = random.Random(0)
COLOURS = generate_random_colours_list(RANDOM_COLOUR_GENERATOR, len(STRUCTURE_LIST))
FILL_HOLES = [True] * len(STRUCTURE_LIST)
# paper:
#
# class HeadAndNeckPaper(HeadAndNeckBase):
#
# def __init__(self):
# super().__init__(
# azure_dataset_id="foo_bar_baz",
# num_structures=10)
class HeadAndNeckBase(SegmentationModelBase):
def __init__(self, num_structures: int = 0, **kwargs: Any) -> None:
# Number of training epochs
num_epochs = 120
# Number of structures to predict; if positive but less than the length of STRUCTURE_LIST, the relevant prefix
# of STRUCTURE_LIST will be predicted.
if num_structures <= 0 or num_structures > len(STRUCTURE_LIST):
num_structures = len(STRUCTURE_LIST)
ground_truth_ids = STRUCTURE_LIST[:num_structures]
colours = COLOURS[:num_structures]
fill_holes = FILL_HOLES[:num_structures]
ground_truth_ids_display_names = [f"zz_{x}" for x in ground_truth_ids]
# The amount of GPU memory required increases with both the number of structures and the
# number of feature channels. The following is a sensible default to avoid out-of-memory,
# but you can override is by passing in another (singleton list) value for feature_channels
# from a subclass.
num_feature_channels = 32 if num_structures <= 20 else 26
bg_weight = 0.02 if len(ground_truth_ids) > 1 else 0.25
# In case of vertical overlap between brainstem and spinal_cord, we separate them
# by converting brainstem voxels to cord, as the latter is clinically more sensitive.
# We do the same to separate SPC and MPC; in this case, the direction of change is unimportant,
# so we choose SPC-to-MPC arbitrarily.
slice_exclusion_rules = []
summed_probability_rules = []
if "brainstem" in ground_truth_ids and "spinal_cord" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("brainstem", "spinal_cord", False))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("spinal_cord", "brainstem", "external"))
if "spc_muscle" in ground_truth_ids and "mpc_muscle" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("spc_muscle", "mpc_muscle", False))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("mpc_muscle", "spc_muscle", "external"))
if "optic_chiasm" in ground_truth_ids and "pituitary_gland" in ground_truth_ids:
slice_exclusion_rules.append(SliceExclusionRule("optic_chiasm", "pituitary_gland", True))
if "external" in ground_truth_ids:
summed_probability_rules.append(SummedProbabilityRule("optic_chiasm", "pituitary_gland", "external"))
super().__init__(
should_validate=False, # we'll validate after kwargs are added
num_epochs=num_epochs,
save_start_epoch=num_epochs,
save_step_epochs=num_epochs,
architecture="UNet3D",
kernel_size=3,
train_batch_size=4,
inference_batch_size=1,
feature_channels=[num_feature_channels],
crop_size=(96, 288, 288),
test_crop_size=(144, 512, 512),
inference_stride_size=(72, 256, 256),
image_channels=["ct"],
norm_method=PhotometricNormalizationMethod.CtWindow,
level=50,
window=600,
start_epoch=0,
l_rate=1e-3,
min_l_rate=1e-5,
l_rate_polynomial_gamma=0.9,
optimizer_type=OptimizerType.Adam,
opt_eps=1e-4,
adam_betas=(0.9, 0.999),
momentum=0.9,
test_diff_epochs=1,
test_step_epochs=1,
use_mixed_precision=True,
use_model_parallel=True,
monitoring_interval_seconds=0,
num_dataload_workers=4,
loss_type=SegmentationLoss.Mixture,
mixture_loss_components=[MixtureLossComponent(0.5, SegmentationLoss.Focal, 0.2),
MixtureLossComponent(0.5, SegmentationLoss.SoftDice, 0.1)],
ground_truth_ids=ground_truth_ids,
ground_truth_ids_display_names=ground_truth_ids_display_names,
largest_connected_component_foreground_classes=ground_truth_ids,
colours=colours,
fill_holes=fill_holes,
class_weights=equally_weighted_classes(ground_truth_ids, background_weight=bg_weight),
slice_exclusion_rules=slice_exclusion_rules,
summed_probability_rules=summed_probability_rules,
)
self.add_and_validate(kwargs)
def get_model_train_test_dataset_splits(self, dataset_df: pd.DataFrame) -> DatasetSplits:
return DatasetSplits.from_proportions(dataset_df, proportion_train=0.8, proportion_val=0.05,
proportion_test=0.15,
random_seed=0)
| true | true |
1c35edc25fb9b1291aa6f22e1e1725263c6bf24f | 5,809 | py | Python | Python/DataStructures/LinkedList.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | 3 | 2021-03-22T14:13:56.000Z | 2022-03-01T03:06:22.000Z | Python/DataStructures/LinkedList.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | Python/DataStructures/LinkedList.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | #Linked List python implementation
# Node class
class Node:
def __init__(self, value, nextNode):
self.value = value
self.next = nextNode
#Linked List class
class LinkedList:
def __init__(self):
self.root = None
def isEmpty(self):
return self.root is None
# Does this linked list contain a certain value?
def contains(self, value):
if self.isEmpty():
return False
tmp = self.root
# While the temp is valid
while tmp is not None:
# If the value is a match, return true.
if tmp.value == value:
return True
# Otherwise, iterate.
tmp = tmp.next
# No matches found, so return false.
return False
def get(self, index):
if self.isEmpty():
raise IndexError('index out of range on LinkedList->get method')
tmp = self.root
# iterate from the root to the given index.
for i in range(0, index):
tmp = tmp.next
# We are out of bounds, raise an exception.
if tmp is None:
raise IndexError('index out of range on LinkedList->get method')
# Return the value of the current node.
return tmp.value
def getFirst(self):
return self.root
def getLast(self):
if self.isEmpty():
return None
tmp = self.root
# Iterate over the linked list.
while tmp.next is not None:
tmp = tmp.next
# Return the value of the last node.
return tmp.value
# Add a specified value to the end of the linked list.
def add(self, value):
if self.isEmpty():
self.root = Node(value, None)
return
tmp = self.root
# Iterate over the list.
while tmp.next is not None:
tmp = tmp.next
# Once at the last node, set it's next node to the new node.
tmp.next = Node(value, None)
def addFirst(self, value):
# Create a new node with the root as it's next node.
tmp = Node(value, self.root)
# Change the reference of the root to the new node.
self.root = tmp
# Add a new node after a specified key.
def addAfter(self, key, toAdd):
if self.isEmpty():
self.root = Node(toAdd, None)
tmp = self.root
# Iterate until we find a match.
while tmp.next is not None:
# Found a match.
if tmp.value == key:
newNode = Node(toAdd, tmp.next)
tmp.next = newNode
return
tmp = tmp.next
# Depends on implementation, but in this version
# we add a new node at the end if no node was matched.
tmp.next = Node(toAdd, None)
# Add a new node before a specified key.
def addBefore(self, key, toAdd):
# Base cases:
if self.isEmpty():
self.root = Node(toAdd, None)
tmpPrev = self.root
tmp = self.root.next
if tmpPrev.value == key:
self.addFirst(toAdd)
return
# Main case:
# Iterate the list.
while tmp is not None:
# If the lead node is a match, break...
if tmp.value == key:
break
# ( Iterate )
tmpPrev = tmp
tmp = tmp.next
# ... and insert the new node at the intermediate.
tmpPrev.next = Node(toAdd, tmp)
# Remove a node with the specified value from the list.
def remove(self, value):
if self.isEmpty():
return
tmp = self.root
# Iterate the list.
while tmp.next is not None:
# If the next node is a match,...
if tmp.next.value == value:
# set the next to be the next next.
# (gc will help us out).
tmp.next = tmp.next.next
return
# Otherwise iterate.
tmp = tmp.next
# Remove the first node from the list.
def removeFirst(self):
if self.isEmpty():
return
# We just increment the root to the next node.
self.root = self.root.next
# Remove the last node from the list
def removeLast(self):
# Base cases:
if self.isEmpty():
return
tmp = self.root
if tmp.next is None:
self.root = None
return
# Main case:
# We need to look 2 ahead to set the 'next' node to none.
while tmp.next.next is not None:
tmp = tmp.next
# Set the 'next' node to none.
tmp.next = None
# Get the length of the linked list.
def length(self):
count = 0
tmp = self.root
while tmp is not None:
count+=1
tmp = tmp.next
return count
# Get the index of a specified value, if it exists.
def indexOf(self, value):
if self.isEmpty():
return -1
count = 0
tmp = self.root
# Iterate the list.
while tmp is not None:
if tmp.value == value:
return count
# Increment and iterate.
count+=1
tmp = tmp.next
# Value doesn't exist, return -1.
return -1
# Clear the linked list.
def clear(self):
self.root = None
return
# Format the linked list to a string.
def __str__(self):
res = "["
tmp = self.root
while tmp is not None:
res += str(tmp.value)
if tmp.next is not None:
res += ", "
tmp = tmp.next
res += "]"
return res
| 29.48731 | 80 | 0.517645 |
class Node:
def __init__(self, value, nextNode):
self.value = value
self.next = nextNode
class LinkedList:
def __init__(self):
self.root = None
def isEmpty(self):
return self.root is None
def contains(self, value):
if self.isEmpty():
return False
tmp = self.root
while tmp is not None:
if tmp.value == value:
return True
tmp = tmp.next
return False
def get(self, index):
if self.isEmpty():
raise IndexError('index out of range on LinkedList->get method')
tmp = self.root
for i in range(0, index):
tmp = tmp.next
if tmp is None:
raise IndexError('index out of range on LinkedList->get method')
return tmp.value
def getFirst(self):
return self.root
def getLast(self):
if self.isEmpty():
return None
tmp = self.root
while tmp.next is not None:
tmp = tmp.next
return tmp.value
def add(self, value):
if self.isEmpty():
self.root = Node(value, None)
return
tmp = self.root
while tmp.next is not None:
tmp = tmp.next
tmp.next = Node(value, None)
def addFirst(self, value):
# Create a new node with the root as it's next node.
tmp = Node(value, self.root)
self.root = tmp
def addAfter(self, key, toAdd):
if self.isEmpty():
self.root = Node(toAdd, None)
tmp = self.root
while tmp.next is not None:
if tmp.value == key:
newNode = Node(toAdd, tmp.next)
tmp.next = newNode
return
tmp = tmp.next
tmp.next = Node(toAdd, None)
def addBefore(self, key, toAdd):
if self.isEmpty():
self.root = Node(toAdd, None)
tmpPrev = self.root
tmp = self.root.next
if tmpPrev.value == key:
self.addFirst(toAdd)
return
while tmp is not None:
if tmp.value == key:
break
tmpPrev = tmp
tmp = tmp.next
tmpPrev.next = Node(toAdd, tmp)
def remove(self, value):
if self.isEmpty():
return
tmp = self.root
while tmp.next is not None:
if tmp.next.value == value:
tmp.next = tmp.next.next
return
tmp = tmp.next
def removeFirst(self):
if self.isEmpty():
return
self.root = self.root.next
def removeLast(self):
if self.isEmpty():
return
tmp = self.root
if tmp.next is None:
self.root = None
return
while tmp.next.next is not None:
tmp = tmp.next
tmp.next = None
def length(self):
count = 0
tmp = self.root
while tmp is not None:
count+=1
tmp = tmp.next
return count
def indexOf(self, value):
if self.isEmpty():
return -1
count = 0
tmp = self.root
while tmp is not None:
if tmp.value == value:
return count
count+=1
tmp = tmp.next
return -1
# Clear the linked list.
def clear(self):
self.root = None
return
# Format the linked list to a string.
def __str__(self):
res = "["
tmp = self.root
while tmp is not None:
res += str(tmp.value)
if tmp.next is not None:
res += ", "
tmp = tmp.next
res += "]"
return res
| true | true |
1c35f04ddadf5cc4f1d5ffbf4e660b5a6d5f1f94 | 4,400 | py | Python | solvers_examples.py | anguyen216/mTSP-work | 47d3b59c83569e9e03c92c9b5140a549d4742bce | [
"MIT"
] | null | null | null | solvers_examples.py | anguyen216/mTSP-work | 47d3b59c83569e9e03c92c9b5140a549d4742bce | [
"MIT"
] | null | null | null | solvers_examples.py | anguyen216/mTSP-work | 47d3b59c83569e9e03c92c9b5140a549d4742bce | [
"MIT"
] | null | null | null | # includes examples of how to run solvers
# Author: Anh Nguyen
import numpy as np
import matplotlib.pyplot as plt
from utils import longLatDistKm, plotPath, plotMultiplePaths
from tsp_solvers.dp_solver import DP_TSP
from tsp_solvers.ILP_solver import ILP_TSP
from tsp_solvers.opt2_solver import OPT2_TSP
from cities import WAYPOINTS, NAMES_DICT
from mtsp_solvers.basic_solver import BASIC_MTSP
import time
def main():
# SETUP
N = len(WAYPOINTS)
opt2_runtime = []
opt2_costs = []
opt2_sizes = [i for i in range(3, N, 3)]
ilp_costs = []
# dp_runtime = []
# ilp_runtime = []
ilp_sizes = [i for i in range(3, 26, 3)]
# dp_sizes = [i for i in range(3, 13, 3)]
# testing basic mTSP solver
indices = np.random.choice(40, size=15, replace=False)
waypoints = [WAYPOINTS[idx] for idx in indices]
start = np.random.randint(0, len(waypoints))
bmtsp = BASIC_MTSP(waypoints, longLatDistKm)
num_v = 2
colors = ['steelblue', 'orange']
v_limits = [100000] * num_v
sol = bmtsp.solve(waypoints[start], num_v, v_limits)
if sol:
paths, cost = sol
print("Total distance traveled by all vehicles:", cost, "km")
pname = "./plots/sample_mtsp_result.png"
plotMultiplePaths(paths, colors, save_plot=True, plot_name=pname)
# Check OPT-2 near-optimal solution
# for s in ilp_sizes:
# print(s)
# indices = np.random.randint(0, 26, size=s)
# waypoints = [WAYPOINTS[idx] for idx in indices]
# start = np.random.randint(0, s)
# ilp = ILP_TSP(waypoints, longLatDistKm)
# opt2 = OPT2_TSP(waypoints, longLatDistKm)
# ilp_path, ilp_cost = ilp.solve(waypoints[start])
# opt2_path, opt2_cost = opt2.solve(waypoints[start])
# opt2_costs.append(opt2_cost)
# ilp_costs.append(ilp_cost)
# plt.plot(ilp_sizes, ilp_costs, label='ILP cost')
# plt.plot(ilp_sizes, opt2_costs, label='OPT-2 cost')
# plt.xlabel('number of waypoints')
# plt.ylabel('solution cost in km')
# plt.legend()
# plt.title('Check OPT-2 near-optimal solution')
# plt.savefig("plots/opt2_cost_check.png")
# Plot OPT-2 runtime
# for s in range(3, N, 3):
# print(s)
# indices = np.random.randint(0, N, size=s)
# waypoints = [WAYPOINTS[idx] for idx in indices]
# start = np.random.randint(0, s)
# opt2 = OPT2_TSP(waypoints, longLatDistKm)
# start_time = time.time()
# opt2_path, opt2_cost = opt2.solve(waypoints[start])
# opt2_runtime.append(time.time() - start_time)
# opt2_costs.append(opt2_cost)
# plt.plot(opt2_sizes, opt2_runtime)
# plt.xlabel('number of waypoints')
# plt.ylabel('runtime in seconds')
# plt.title('runtime of OPT-2 approximation for TSP problem')
# plt.savefig("plots/opt2_tsp_runtime.png")
# Plot ILP runtime
# for s in ilp_sizes:
# # randomize the waypoints and number of waypoints
# # pick a random starting point
# print(s)
# indices = np.random.randint(0, 26, size=s)
# waypoints = [WAYPOINTS[idx] for idx in indices]
# start = np.random.randint(0, len(waypoints))
# # setup solvers
# ilp = ILP_TSP(waypoints, longLatDistKm)
# # time solvers
# start_time = time.time()
# ilp_path, ilp_cost = ilp.solve(waypoints[start])
# ilp_runtime.append(time.time() - start_time)
# # ILP plot
# plt.plot(ilp_sizes, ilp_runtime)
# plt.xlabel('number of waypoints')
# plt.ylabel('runtime in seconds')
# plt.title('runtime of ILP solver for TSP problem')
# plt.savefig("plots/ilp_tsp_runtime.png")
# Plot DP runtime
# for s in dp_sizes:
# print(s)
# indices = np.random.randint(0, 13, size=s)
# waypoints = [WAYPOINTS[idx] for idx in indices]
# start = np.random.randint(0, len(waypoints))
# dp = DP_TSP(waypoints, longLatDistKm)
# start_time = time.time()
# dp_path, dp_cost = dp.solve(waypoints[start])
# dp_runtime.append(time.time() - start_time)
#
# # DP plot
# plt.plot(dp_sizes, dp_runtime)
# plt.xlabel('number of waypoints')
# plt.ylabel('runtime in seconds')
# plt.title('runtime of DP solver for TSP problem')
# plt.savefig("plots/dp_tsp_runtime.png")
if __name__ == "__main__":
main()
| 36.666667 | 73 | 0.635682 |
import numpy as np
import matplotlib.pyplot as plt
from utils import longLatDistKm, plotPath, plotMultiplePaths
from tsp_solvers.dp_solver import DP_TSP
from tsp_solvers.ILP_solver import ILP_TSP
from tsp_solvers.opt2_solver import OPT2_TSP
from cities import WAYPOINTS, NAMES_DICT
from mtsp_solvers.basic_solver import BASIC_MTSP
import time
def main():
N = len(WAYPOINTS)
opt2_runtime = []
opt2_costs = []
opt2_sizes = [i for i in range(3, N, 3)]
ilp_costs = []
ilp_sizes = [i for i in range(3, 26, 3)]
indices = np.random.choice(40, size=15, replace=False)
waypoints = [WAYPOINTS[idx] for idx in indices]
start = np.random.randint(0, len(waypoints))
bmtsp = BASIC_MTSP(waypoints, longLatDistKm)
num_v = 2
colors = ['steelblue', 'orange']
v_limits = [100000] * num_v
sol = bmtsp.solve(waypoints[start], num_v, v_limits)
if sol:
paths, cost = sol
print("Total distance traveled by all vehicles:", cost, "km")
pname = "./plots/sample_mtsp_result.png"
plotMultiplePaths(paths, colors, save_plot=True, plot_name=pname)
if __name__ == "__main__":
main()
| true | true |
1c35f11db06b0d641005dee813bcf7462e94f8d8 | 5,022 | py | Python | members/migrations/0001_initial.py | Herrgrim0/Site | 13ec1a58dece7ceedddc4fe92064aee661c90331 | [
"MIT"
] | 4 | 2017-05-01T10:06:36.000Z | 2019-03-26T20:41:50.000Z | members/migrations/0001_initial.py | Herrgrim0/Site | 13ec1a58dece7ceedddc4fe92064aee661c90331 | [
"MIT"
] | 8 | 2016-09-12T18:42:52.000Z | 2017-12-25T14:08:22.000Z | members/migrations/0001_initial.py | Herrgrim0/Site | 13ec1a58dece7ceedddc4fe92064aee661c90331 | [
"MIT"
] | 7 | 2016-08-31T14:44:28.000Z | 2019-10-31T11:33:06.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-11 11:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AcademicYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateField(verbose_name='Date de début')),
('stop', models.DateField(verbose_name='Date de fin')),
('slug', models.CharField(max_length=4)),
('active', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Année académique',
'ordering': ['-start'],
},
),
migrations.CreateModel(
name='ComiteMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ComitePoste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.CharField(max_length=5)),
('email', models.EmailField(blank=True, max_length=254)),
('is_bureau', models.BooleanField(default=False)),
('is_bapteme', models.BooleanField(default=False)),
('weight', models.IntegerField()),
],
options={
'verbose_name': 'Poste dans le comité',
'ordering': ['-weight'],
},
),
migrations.CreateModel(
name='CustomPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Permission',
},
),
migrations.CreateModel(
name='CustomPermissionsManager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expiration_date', models.DateTimeField(blank=True, null=True)),
('groups', models.ManyToManyField(blank=True, to='auth.Group')),
('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.CustomPermission')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Permissions Manager',
},
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, null=True, upload_to='images/members', verbose_name='avatar')),
('wiki', models.URLField(blank=True, null=True)),
('birthdate', models.DateField(blank=True, null=True, verbose_name='date de naissance')),
('extra_info', models.TextField(blank=True, default='')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SurName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=250)),
('is_prefered', models.BooleanField(default=False)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.Member')),
],
),
migrations.AddField(
model_name='comitemembership',
name='member',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='members.Member'),
),
migrations.AddField(
model_name='comitemembership',
name='poste',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.ComitePoste'),
),
migrations.AddField(
model_name='comitemembership',
name='year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.AcademicYear', verbose_name='année'),
),
]
| 43.669565 | 132 | 0.570291 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AcademicYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateField(verbose_name='Date de début')),
('stop', models.DateField(verbose_name='Date de fin')),
('slug', models.CharField(max_length=4)),
('active', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Année académique',
'ordering': ['-start'],
},
),
migrations.CreateModel(
name='ComiteMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ComitePoste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.CharField(max_length=5)),
('email', models.EmailField(blank=True, max_length=254)),
('is_bureau', models.BooleanField(default=False)),
('is_bapteme', models.BooleanField(default=False)),
('weight', models.IntegerField()),
],
options={
'verbose_name': 'Poste dans le comité',
'ordering': ['-weight'],
},
),
migrations.CreateModel(
name='CustomPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Permission',
},
),
migrations.CreateModel(
name='CustomPermissionsManager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expiration_date', models.DateTimeField(blank=True, null=True)),
('groups', models.ManyToManyField(blank=True, to='auth.Group')),
('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.CustomPermission')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Permissions Manager',
},
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, null=True, upload_to='images/members', verbose_name='avatar')),
('wiki', models.URLField(blank=True, null=True)),
('birthdate', models.DateField(blank=True, null=True, verbose_name='date de naissance')),
('extra_info', models.TextField(blank=True, default='')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SurName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=250)),
('is_prefered', models.BooleanField(default=False)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.Member')),
],
),
migrations.AddField(
model_name='comitemembership',
name='member',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='members.Member'),
),
migrations.AddField(
model_name='comitemembership',
name='poste',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.ComitePoste'),
),
migrations.AddField(
model_name='comitemembership',
name='year',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members.AcademicYear', verbose_name='année'),
),
]
| true | true |
1c35f30d3a32e33f2f27177392bb3f592d7d96c9 | 13,016 | py | Python | rpython/jit/backend/model.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | idea2/pypyjs-3/deps/pypy/rpython/jit/backend/model.py | igormcoelho/neo-boa | c141b503183cab287744cd19be5dfd86d9bc8daf | [
"MIT"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | idea2/pypyjs-3/deps/pypy/rpython/jit/backend/model.py | igormcoelho/neo-boa | c141b503183cab287744cd19be5dfd86d9bc8daf | [
"MIT"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | import weakref
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rtyper.lltypesystem import lltype, llmemory
class CPUTotalTracker(object):
total_compiled_loops = 0
total_compiled_bridges = 0
total_freed_loops = 0
total_freed_bridges = 0
# for heaptracker
# _all_size_descrs_with_vtable = None
_vtable_to_descr_dict = None
class AbstractCPU(object):
supports_floats = False
supports_longlong = False
# ^^^ This is only useful on 32-bit platforms. If True,
# longlongs are supported by the JIT, but stored as doubles.
# Boxes and Consts are BoxFloats and ConstFloats.
supports_singlefloats = False
propagate_exception_descr = None
def __init__(self):
self.tracker = CPUTotalTracker()
def _freeze_(self):
return True
def setup_once(self):
"""Called once by the front-end when the program starts."""
pass
def finish_once(self):
"""Called once by the front-end when the program stops."""
pass
def get_all_loop_runs(self):
""" Function that will return number of times all the loops were run.
Requires earlier setting of set_debug(True), otherwise you won't
get the information.
Returns an instance of LOOP_RUN_CONTAINER from rlib.jit_hooks
"""
raise NotImplementedError
def set_debug(self, value):
""" Enable or disable debugging info. Does nothing by default. Returns
the previous setting.
"""
return False
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
"""Assemble the given loop.
Should create and attach a fresh CompiledLoopToken to
looptoken.compiled_loop_token and stick extra attributes
on it to point to the compiled loop in assembler.
Returns either None or an instance of rpython.rlib.jit.AsmInfo.
"""
raise NotImplementedError
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
"""Assemble the bridge.
The FailDescr is the descr of the original guard that failed.
Returns either None or an instance of rpython.rlib.jit.AsmInfo.
"""
raise NotImplementedError
def dump_loop_token(self, looptoken):
"""Print a disassembled version of looptoken to stdout"""
raise NotImplementedError
def execute_token(self, looptoken, *args):
"""NOT_RPYTHON (for tests only)
Execute the generated code referenced by the looptoken.
When done, this returns a 'dead JIT frame' object that can
be inspected with the get_latest_xxx() methods.
"""
argtypes = [lltype.typeOf(x) for x in args]
execute = self.make_execute_token(*argtypes)
return execute(looptoken, *args)
def make_execute_token(self, *argtypes):
"""Must make and return an execute_token() function that will be
called with the given argtypes.
"""
raise NotImplementedError
def get_latest_descr(self, deadframe):
"""Returns the Descr for the last operation executed by the frame."""
raise NotImplementedError
def get_int_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns an int."""
raise NotImplementedError
def get_float_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns a FLOATSTORAGE."""
raise NotImplementedError
def get_ref_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns a GCREF."""
raise NotImplementedError
def grab_exc_value(self, deadframe):
"""Return the exception set by the latest execute_token(),
when it exits due to a failure of a GUARD_EXCEPTION or
GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me
raise NotImplementedError
def set_savedata_ref(self, deadframe, data):
"""For the front-end: store a GCREF on the deadframe object."""
raise NotImplementedError
def get_savedata_ref(self, deadframe):
"""For the front-end: get the GCREF saved with set_savedata_ref()."""
raise NotImplementedError
def force(self, force_token):
"""Take a 'force token' as produced by the FORCE_TOKEN operation,
and 'kill' the corresponding JIT frame, which should be somewhere
in the stack right now. Returns it as a dead frame object. When
we later return to the JIT frame, the next operation executed must
be a GUARD_NOT_FORCED, which will fail."""
raise NotImplementedError
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
"""Redirect oldlooptoken to newlooptoken. More precisely, it is
enough to redirect all CALL_ASSEMBLERs already compiled that call
oldlooptoken so that from now own they will call newlooptoken."""
raise NotImplementedError
def invalidate_loop(self, looptoken):
"""Activate all GUARD_NOT_INVALIDATED in the loop and its attached
bridges. Before this call, all GUARD_NOT_INVALIDATED do nothing;
after this call, they all fail. Note that afterwards, if one such
guard fails often enough, it has a bridge attached to it; it is
possible then to re-call invalidate_loop() on the same looptoken,
which must invalidate all newer GUARD_NOT_INVALIDATED, but not the
old one that already has a bridge attached to it."""
raise NotImplementedError
def free_loop_and_bridges(self, compiled_loop_token):
"""This method is called to free resources (machine code,
references to resume guards, etc.) allocated by the compilation
of a loop and all bridges attached to it. After this call, the
frontend cannot use this compiled loop any more; in fact, it
guarantees that at the point of the call to free_code_group(),
none of the corresponding assembler is currently running.
"""
pass
def sizeof(self, S):
raise NotImplementedError
def fielddescrof(self, S, fieldname):
"""Return the Descr corresponding to field 'fieldname' on the
structure 'S'. It is important that this function (at least)
caches the results."""
raise NotImplementedError
def interiorfielddescrof(self, A, fieldname):
raise NotImplementedError
def arraydescrof(self, A):
raise NotImplementedError
def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
# FUNC is the original function type, but ARGS is a list of types
# with Voids removed
raise NotImplementedError
def typedescrof(self, TYPE):
raise NotImplementedError
def unpack_arraydescr_size(self, arraydescr):
"""
Return basesize, itemsize, is_signed
"""
raise NotImplementedError
@staticmethod
def cast_int_to_ptr(x, TYPE):
x = llmemory.cast_int_to_adr(x)
return llmemory.cast_adr_to_ptr(x, TYPE)
# ---------- the backend-dependent operations ----------
# lltype specific operations
# --------------------------
def bh_getarrayitem_gc_i(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_r(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_f(self, array, index, arraydescr):
raise NotImplementedError
def bh_getfield_gc_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_f(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_f(self, struct, fielddescr):
raise NotImplementedError
def bh_new(self, sizedescr):
raise NotImplementedError
def bh_new_with_vtable(self, vtable, sizedescr):
raise NotImplementedError
def bh_new_array(self, length, arraydescr):
raise NotImplementedError
def bh_newstr(self, length):
raise NotImplementedError
def bh_newunicode(self, length):
raise NotImplementedError
def bh_new_raw_buffer(self, size):
raise NotImplementedError
def bh_arraylen_gc(self, array, arraydescr):
raise NotImplementedError
def bh_classof(self, struct):
raise NotImplementedError
def bh_setarrayitem_gc_i(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_r(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_f(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setfield_gc_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_r(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_call_i(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_r(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_f(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_v(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_strlen(self, string):
raise NotImplementedError
def bh_strgetitem(self, string, index):
raise NotImplementedError
def bh_unicodelen(self, string):
raise NotImplementedError
def bh_unicodegetitem(self, string, index):
raise NotImplementedError
def bh_strsetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_unicodesetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
class CompiledLoopToken(object):
asmmemmgr_blocks = None
asmmemmgr_gcroots = 0
def __init__(self, cpu, number):
cpu.tracker.total_compiled_loops += 1
self.cpu = cpu
self.number = number
self.bridges_count = 0
self.invalidate_positions = []
# a list of weakrefs to looptokens that has been redirected to
# this one
self.looptokens_redirected_to = []
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def compiling_a_bridge(self):
self.cpu.tracker.total_compiled_bridges += 1
self.bridges_count += 1
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def update_frame_info(self, oldlooptoken, baseofs):
new_fi = self.frame_info
new_loop_tokens = []
for ref in oldlooptoken.looptokens_redirected_to:
looptoken = ref()
if looptoken:
looptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
new_loop_tokens.append(ref)
oldlooptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
assert oldlooptoken is not None
new_loop_tokens.append(weakref.ref(oldlooptoken))
self.looptokens_redirected_to = new_loop_tokens
def __del__(self):
#debug_start("jit-mem-looptoken-free")
#debug_print("freeing Loop #", self.number, 'with',
# self.bridges_count, 'attached bridges')
self.cpu.free_loop_and_bridges(self)
self.cpu.tracker.total_freed_loops += 1
self.cpu.tracker.total_freed_bridges += self.bridges_count
#debug_stop("jit-mem-looptoken-free")
| 38.97006 | 88 | 0.677628 | import weakref
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rtyper.lltypesystem import lltype, llmemory
class CPUTotalTracker(object):
total_compiled_loops = 0
total_compiled_bridges = 0
total_freed_loops = 0
total_freed_bridges = 0
_vtable_to_descr_dict = None
class AbstractCPU(object):
supports_floats = False
supports_longlong = False
supports_singlefloats = False
propagate_exception_descr = None
def __init__(self):
self.tracker = CPUTotalTracker()
def _freeze_(self):
return True
def setup_once(self):
pass
def finish_once(self):
pass
def get_all_loop_runs(self):
raise NotImplementedError
def set_debug(self, value):
return False
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
raise NotImplementedError
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
raise NotImplementedError
def dump_loop_token(self, looptoken):
raise NotImplementedError
def execute_token(self, looptoken, *args):
argtypes = [lltype.typeOf(x) for x in args]
execute = self.make_execute_token(*argtypes)
return execute(looptoken, *args)
def make_execute_token(self, *argtypes):
raise NotImplementedError
def get_latest_descr(self, deadframe):
raise NotImplementedError
def get_int_value(self, deadframe, index):
raise NotImplementedError
def get_float_value(self, deadframe, index):
raise NotImplementedError
def get_ref_value(self, deadframe, index):
raise NotImplementedError
def grab_exc_value(self, deadframe):
raise NotImplementedError
def set_savedata_ref(self, deadframe, data):
raise NotImplementedError
def get_savedata_ref(self, deadframe):
raise NotImplementedError
def force(self, force_token):
raise NotImplementedError
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
raise NotImplementedError
def invalidate_loop(self, looptoken):
raise NotImplementedError
def free_loop_and_bridges(self, compiled_loop_token):
pass
def sizeof(self, S):
raise NotImplementedError
def fielddescrof(self, S, fieldname):
raise NotImplementedError
def interiorfielddescrof(self, A, fieldname):
raise NotImplementedError
def arraydescrof(self, A):
raise NotImplementedError
def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
raise NotImplementedError
def typedescrof(self, TYPE):
raise NotImplementedError
def unpack_arraydescr_size(self, arraydescr):
raise NotImplementedError
@staticmethod
def cast_int_to_ptr(x, TYPE):
x = llmemory.cast_int_to_adr(x)
return llmemory.cast_adr_to_ptr(x, TYPE)
def bh_getarrayitem_gc_i(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_r(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_f(self, array, index, arraydescr):
raise NotImplementedError
def bh_getfield_gc_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_f(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_f(self, struct, fielddescr):
raise NotImplementedError
def bh_new(self, sizedescr):
raise NotImplementedError
def bh_new_with_vtable(self, vtable, sizedescr):
raise NotImplementedError
def bh_new_array(self, length, arraydescr):
raise NotImplementedError
def bh_newstr(self, length):
raise NotImplementedError
def bh_newunicode(self, length):
raise NotImplementedError
def bh_new_raw_buffer(self, size):
raise NotImplementedError
def bh_arraylen_gc(self, array, arraydescr):
raise NotImplementedError
def bh_classof(self, struct):
raise NotImplementedError
def bh_setarrayitem_gc_i(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_r(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_f(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setfield_gc_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_r(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_call_i(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_r(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_f(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_v(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_strlen(self, string):
raise NotImplementedError
def bh_strgetitem(self, string, index):
raise NotImplementedError
def bh_unicodelen(self, string):
raise NotImplementedError
def bh_unicodegetitem(self, string, index):
raise NotImplementedError
def bh_strsetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_unicodesetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
class CompiledLoopToken(object):
asmmemmgr_blocks = None
asmmemmgr_gcroots = 0
def __init__(self, cpu, number):
cpu.tracker.total_compiled_loops += 1
self.cpu = cpu
self.number = number
self.bridges_count = 0
self.invalidate_positions = []
self.looptokens_redirected_to = []
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def compiling_a_bridge(self):
self.cpu.tracker.total_compiled_bridges += 1
self.bridges_count += 1
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def update_frame_info(self, oldlooptoken, baseofs):
new_fi = self.frame_info
new_loop_tokens = []
for ref in oldlooptoken.looptokens_redirected_to:
looptoken = ref()
if looptoken:
looptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
new_loop_tokens.append(ref)
oldlooptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
assert oldlooptoken is not None
new_loop_tokens.append(weakref.ref(oldlooptoken))
self.looptokens_redirected_to = new_loop_tokens
def __del__(self):
self.cpu.free_loop_and_bridges(self)
self.cpu.tracker.total_freed_loops += 1
self.cpu.tracker.total_freed_bridges += self.bridges_count
| true | true |
1c35f69ad59be07090db7f3539f86ff7d6d0b4e8 | 4,203 | py | Python | server/forestgame/game/test_world.py | Nick-Pearson/forestgame | 8a37225adbe6da9df7851eba34ad06806da0ce48 | [
"0BSD"
] | null | null | null | server/forestgame/game/test_world.py | Nick-Pearson/forestgame | 8a37225adbe6da9df7851eba34ad06806da0ce48 | [
"0BSD"
] | 5 | 2021-03-10T14:18:45.000Z | 2022-03-12T00:28:29.000Z | server/forestgame/game/test_world.py | Nick-Pearson/forestgame | 8a37225adbe6da9df7851eba34ad06806da0ce48 | [
"0BSD"
] | null | null | null | import unittest
from forestgame.game.world import World
class WorldTest(unittest.TestCase):
def test_world_inits_to_empty_data(self):
world = World(None, "1", "0", 0, 0, [], [])
self.assertEqual(0, world.get_size_x())
self.assertEqual(0, world.get_size_y())
self.assertEqual([], world.get_tile_data())
def test_world_with_tiles_inits__with_tiles_to_empty_data(self):
world = World(None, "1", "0", 3, 3, [(1, 1, 0)], [])
expected_tile_data = [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_from_zero_initialsies_from_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_x_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 2)
expected_tile_data = [
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_smaller_x_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 3)
expected_tile_data = [
[1, 1],
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 2)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_same_x_y_does_nothing(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
# set tile range checks
def test_set_tile_changes_tile_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(5, 5)
world.set_tile_at(2, 3, 0)
self.assertEqual(0, world.get_tile_at(2, 3))
expected_tile_data = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]
]
self.assertEqual(expected_tile_data, world.get_tile_data())
| 26.601266 | 66 | 0.610278 | import unittest
from forestgame.game.world import World
class WorldTest(unittest.TestCase):
def test_world_inits_to_empty_data(self):
world = World(None, "1", "0", 0, 0, [], [])
self.assertEqual(0, world.get_size_x())
self.assertEqual(0, world.get_size_y())
self.assertEqual([], world.get_tile_data())
def test_world_with_tiles_inits__with_tiles_to_empty_data(self):
world = World(None, "1", "0", 3, 3, [(1, 1, 0)], [])
expected_tile_data = [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_from_zero_initialsies_from_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_x_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 2)
expected_tile_data = [
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_smaller_x_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 3)
expected_tile_data = [
[1, 1],
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 2)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_same_x_y_does_nothing(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_tile_changes_tile_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(5, 5)
world.set_tile_at(2, 3, 0)
self.assertEqual(0, world.get_tile_at(2, 3))
expected_tile_data = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]
]
self.assertEqual(expected_tile_data, world.get_tile_data())
| true | true |
1c35f69f6f6e4bc9277d129538c716070d736d61 | 11,855 | py | Python | test/python/set_view/compaction_transitions.py | vmx/couchbase-couchdb | e99e2bdcb912bcdecda10e881dc08c9292dffdf2 | [
"Apache-2.0"
] | 93 | 2015-01-19T16:36:36.000Z | 2022-03-24T00:26:33.000Z | test/python/set_view/compaction_transitions.py | vmx/couchbase-couchdb | e99e2bdcb912bcdecda10e881dc08c9292dffdf2 | [
"Apache-2.0"
] | 4 | 2015-04-27T00:01:05.000Z | 2021-07-26T08:16:56.000Z | test/python/set_view/compaction_transitions.py | vmx/couchbase-couchdb | e99e2bdcb912bcdecda10e881dc08c9292dffdf2 | [
"Apache-2.0"
] | 62 | 2015-01-04T13:07:15.000Z | 2022-02-25T10:02:14.000Z | #!/usr/bin/python
try: import simplejson as json
except ImportError: import json
import couchdb
import httplib
import urllib
import time
import common
import unittest
HOST = "localhost:5984"
SET_NAME = "test_suite_set_view_compact"
NUM_PARTS = 8
NUM_DOCS = 800000
DDOC = {
"_id": "_design/test",
"language": "javascript",
"views": {
"mapview1": {
"map": "function(doc) { emit(doc.integer, doc.string); }"
}
}
}
class TestCompactionTransitions(unittest.TestCase):
def setUp(self):
self._params = {
"host": HOST,
"ddoc": DDOC,
"nparts": NUM_PARTS,
"ndocs": NUM_DOCS,
"setname": SET_NAME,
"server": couchdb.Server(url = "http://" + HOST)
}
# print "Creating databases"
common.create_dbs(self._params)
common.populate(self._params)
# print "Configuring set view with:"
# print "\tmaximum of 8 partitions"
# print "\tactive partitions = [0, 1, 2, 3, 4]"
# print "\tpassive partitions = []"
common.define_set_view(self._params, [0, 1, 2, 3], [])
def tearDown(self):
# print "Deleting test data"
common.create_dbs(self._params, True)
def test_compaction_transitions(self):
self.do_test_set_passive_during_compaction()
self.do_test_set_active_during_compaction()
def do_test_set_passive_during_compaction(self):
# print "Querying map view"
(map_resp, map_view_result) = common.query(self._params, "mapview1")
doc_count = common.set_doc_count(self._params, [0, 1, 2, 3])
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), doc_count,
"Query returned %d rows" % doc_count)
common.test_keys_sorted(map_view_result)
# print "Verifying set view group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3]:
db = self._params["server"][self._params["setname"] + "/" + str(i)]
seq = db.info()["update_seq"]
self.assertEqual(info["update_seqs"][str(i)], seq,
"right update seq for partition %d" % (i + 1))
# print "Triggering view compaction"
common.compact_set_view(self._params, False)
# print "Marking partition 4 as passive"
common.set_partition_states(self._params, passive = [3])
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
# print "Waiting for compaction to finish"
count = 0
while info["stats"]["compactions"] < 1:
time.sleep(0.5)
count += 1
info = common.get_set_view_info(self._params)
self.assertTrue((count > 0), "Compaction was running when the partition states were updated")
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
# print "Querying map view again"
(map_resp, map_view_result) = common.query(self._params, "mapview1")
expected = common.set_doc_count(self._params, [0, 1, 2])
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), expected,
"Query returned %d rows" % expected)
common.test_keys_sorted(map_view_result)
all_keys = {}
for r in map_view_result["rows"]:
all_keys[r["key"]] = True
for key in xrange(4, self._params["ndocs"], self._params["nparts"]):
self.assertFalse(key in all_keys,
"Key %d not in result after partition 4 set to passive" % key)
# print "Triggering view compaction"
common.compact_set_view(self._params, False)
# print "Adding two new partitions, 5 and 6, as passive while compaction is running"
common.set_partition_states(self._params, passive = [4, 5])
# print "Waiting for compaction to finish"
info = common.get_set_view_info(self._params)
count = 0
while info["stats"]["compactions"] < 2:
time.sleep(0.5)
count += 1
info = common.get_set_view_info(self._params)
self.assertTrue((count > 0), "Compaction was running when the partition states were updated")
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3, 4, 5], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
for i in [6, 7]:
self.assertFalse(str(i) in info["update_seqs"], "%d not in info.update_seqs" % i)
# print "Querying map view again"
(map_resp, map_view_result2) = common.query(self._params, "mapview1")
self.assertEqual(map_view_result2["rows"], map_view_result["rows"], "Same result set as before")
total_doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
# print "Adding 1 new document to each partition"
new_docs = []
for i in [0, 1, 2, 3, 4, 5, 6, 7]:
server = self._params["server"]
db = self._params["server"][self._params["setname"] + "/" + str(i)]
value = total_doc_count + i + 1
new_doc = {
"_id": str(value),
"integer": value,
"string": str(value)
}
new_docs.append(new_doc)
db.save(new_doc)
new_total_doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(new_total_doc_count, (total_doc_count + 8), "8 documents added")
self.assertEqual(len(new_docs), 8, "8 documents added")
info = common.get_set_view_info(self._params)
if info["updater_running"]:
# print "Waiting for updater to finish"
self.assertEqual(info["updater_state"], "updating_passive",
"updater state is updating_passive")
while True:
info = common.get_set_view_info(self._params)
if info["updater_running"]:
self.assertEqual(info["updater_state"], "updating_passive",
"updater state is updating_passive")
time.sleep(3)
else:
break
expected_row_count = common.set_doc_count(self._params, [0, 1, 2])
expected_total_rows = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5])
# print "Querying map view again"
(map_resp, map_view_result) = common.query(self._params, "mapview1")
self.assertEqual(len(map_view_result["rows"]), expected_row_count, "len(rows) is %d" % expected_row_count)
common.test_keys_sorted(map_view_result)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3, 4, 5], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"info.update_seqs[%d] is %d" % (i, expected_seq))
def do_test_set_active_during_compaction(self):
# print "Triggering view compaction"
common.compact_set_view(self._params, False)
# print "Marking partitions 4, 5 and 6 as active while compaction is running"
common.set_partition_states(self._params, active = [3, 4, 5])
# print "Adding new partitions 7 and 8 with active state while compaction is running"
common.set_partition_states(self._params, active = [6, 7])
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
# print "Querying map view while compaction is running"
(map_resp, map_view_result) = common.query(self._params, "mapview1", {"limit": "10"})
self.assertEqual(len(map_view_result["rows"]), 10, "Query returned 10 rows")
common.test_keys_sorted(map_view_result)
# print "Waiting for compaction to finish"
compaction_was_running = (common.wait_set_view_compaction_complete(self._params) > 0)
self.assertTrue(compaction_was_running, "Compaction was running when the view update was triggered")
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
# print "Querying map view again"
doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
(map_resp, map_view_result) = common.query(self._params, "mapview1")
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), doc_count,
"Query returned %d rows" % doc_count)
common.test_keys_sorted(map_view_result)
# print "Verifying group info"
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5, 6, 7]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"info.update_seqs[%d] is %d" % (i, expected_seq))
| 44.735849 | 114 | 0.619739 |
try: import simplejson as json
except ImportError: import json
import couchdb
import httplib
import urllib
import time
import common
import unittest
HOST = "localhost:5984"
SET_NAME = "test_suite_set_view_compact"
NUM_PARTS = 8
NUM_DOCS = 800000
DDOC = {
"_id": "_design/test",
"language": "javascript",
"views": {
"mapview1": {
"map": "function(doc) { emit(doc.integer, doc.string); }"
}
}
}
class TestCompactionTransitions(unittest.TestCase):
def setUp(self):
self._params = {
"host": HOST,
"ddoc": DDOC,
"nparts": NUM_PARTS,
"ndocs": NUM_DOCS,
"setname": SET_NAME,
"server": couchdb.Server(url = "http://" + HOST)
}
common.create_dbs(self._params)
common.populate(self._params)
common.define_set_view(self._params, [0, 1, 2, 3], [])
def tearDown(self):
common.create_dbs(self._params, True)
def test_compaction_transitions(self):
self.do_test_set_passive_during_compaction()
self.do_test_set_active_during_compaction()
def do_test_set_passive_during_compaction(self):
(map_resp, map_view_result) = common.query(self._params, "mapview1")
doc_count = common.set_doc_count(self._params, [0, 1, 2, 3])
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), doc_count,
"Query returned %d rows" % doc_count)
common.test_keys_sorted(map_view_result)
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3]:
db = self._params["server"][self._params["setname"] + "/" + str(i)]
seq = db.info()["update_seq"]
self.assertEqual(info["update_seqs"][str(i)], seq,
"right update seq for partition %d" % (i + 1))
common.compact_set_view(self._params, False)
common.set_partition_states(self._params, passive = [3])
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
count = 0
while info["stats"]["compactions"] < 1:
time.sleep(0.5)
count += 1
info = common.get_set_view_info(self._params)
self.assertTrue((count > 0), "Compaction was running when the partition states were updated")
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
(map_resp, map_view_result) = common.query(self._params, "mapview1")
expected = common.set_doc_count(self._params, [0, 1, 2])
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), expected,
"Query returned %d rows" % expected)
common.test_keys_sorted(map_view_result)
all_keys = {}
for r in map_view_result["rows"]:
all_keys[r["key"]] = True
for key in xrange(4, self._params["ndocs"], self._params["nparts"]):
self.assertFalse(key in all_keys,
"Key %d not in result after partition 4 set to passive" % key)
common.compact_set_view(self._params, False)
common.set_partition_states(self._params, passive = [4, 5])
info = common.get_set_view_info(self._params)
count = 0
while info["stats"]["compactions"] < 2:
time.sleep(0.5)
count += 1
info = common.get_set_view_info(self._params)
self.assertTrue((count > 0), "Compaction was running when the partition states were updated")
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3, 4, 5], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
for i in [6, 7]:
self.assertFalse(str(i) in info["update_seqs"], "%d not in info.update_seqs" % i)
(map_resp, map_view_result2) = common.query(self._params, "mapview1")
self.assertEqual(map_view_result2["rows"], map_view_result["rows"], "Same result set as before")
total_doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
new_docs = []
for i in [0, 1, 2, 3, 4, 5, 6, 7]:
server = self._params["server"]
db = self._params["server"][self._params["setname"] + "/" + str(i)]
value = total_doc_count + i + 1
new_doc = {
"_id": str(value),
"integer": value,
"string": str(value)
}
new_docs.append(new_doc)
db.save(new_doc)
new_total_doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(new_total_doc_count, (total_doc_count + 8), "8 documents added")
self.assertEqual(len(new_docs), 8, "8 documents added")
info = common.get_set_view_info(self._params)
if info["updater_running"]:
self.assertEqual(info["updater_state"], "updating_passive",
"updater state is updating_passive")
while True:
info = common.get_set_view_info(self._params)
if info["updater_running"]:
self.assertEqual(info["updater_state"], "updating_passive",
"updater state is updating_passive")
time.sleep(3)
else:
break
expected_row_count = common.set_doc_count(self._params, [0, 1, 2])
expected_total_rows = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5])
(map_resp, map_view_result) = common.query(self._params, "mapview1")
self.assertEqual(len(map_view_result["rows"]), expected_row_count, "len(rows) is %d" % expected_row_count)
common.test_keys_sorted(map_view_result)
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2], "right active partitions list")
self.assertEqual(info["passive_partitions"], [3, 4, 5], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"info.update_seqs[%d] is %d" % (i, expected_seq))
def do_test_set_active_during_compaction(self):
common.compact_set_view(self._params, False)
common.set_partition_states(self._params, active = [3, 4, 5])
common.set_partition_states(self._params, active = [6, 7])
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
(map_resp, map_view_result) = common.query(self._params, "mapview1", {"limit": "10"})
self.assertEqual(len(map_view_result["rows"]), 10, "Query returned 10 rows")
common.test_keys_sorted(map_view_result)
compaction_was_running = (common.wait_set_view_compaction_complete(self._params) > 0)
self.assertTrue(compaction_was_running, "Compaction was running when the view update was triggered")
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
doc_count = common.set_doc_count(self._params, [0, 1, 2, 3, 4, 5, 6, 7])
(map_resp, map_view_result) = common.query(self._params, "mapview1")
self.assertEqual(map_view_result["total_rows"], doc_count,
"Query returned %d total_rows" % doc_count)
self.assertEqual(len(map_view_result["rows"]), doc_count,
"Query returned %d rows" % doc_count)
common.test_keys_sorted(map_view_result)
info = common.get_set_view_info(self._params)
self.assertEqual(info["active_partitions"], [0, 1, 2, 3, 4, 5, 6, 7], "right active partitions list")
self.assertEqual(info["passive_partitions"], [], "right passive partitions list")
self.assertEqual(info["cleanup_partitions"], [], "right cleanup partitions list")
for i in [0, 1, 2, 3, 4, 5, 6, 7]:
self.assertTrue(str(i) in info["update_seqs"], "%d in info.update_seqs" % i)
expected_seq = common.partition_update_seq(self._params, i)
self.assertEqual(info["update_seqs"][str(i)], expected_seq,
"info.update_seqs[%d] is %d" % (i, expected_seq))
| true | true |
1c35f6b1ff20a2f3b791fcdab1fe667fcfb56192 | 1,759 | py | Python | arkav_is_api/competition/migrations/0016_refactor_competition_2.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 3 | 2018-10-01T16:30:15.000Z | 2020-01-06T09:03:57.000Z | arkav_is_api/competition/migrations/0016_refactor_competition_2.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 4 | 2018-11-03T10:56:52.000Z | 2020-04-26T06:54:16.000Z | arkav_is_api/competition/migrations/0016_refactor_competition_2.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 2 | 2018-09-26T16:28:01.000Z | 2019-09-04T06:23:14.000Z | # Generated by Django 2.1.3 on 2018-11-08 09:32
from string import ascii_uppercase, digits
from django.db import migrations
from django.utils.crypto import get_random_string
def generate_team_invitation_token():
return get_random_string(16, allowed_chars=ascii_uppercase + digits)
def convert_team_category_to_use_fk(apps, schema_editor):
Team = apps.get_model('competition', 'Team')
CompetitionCategory = apps.get_model('competition', 'CompetitionCategory')
for team in Team.objects.all():
try:
team.category_fk = CompetitionCategory.objects.get(name=team.category)
team.save()
except CompetitionCategory.DoesNotExist:
print(
'WARNING: CompetitionCategory with name %s does not exist for team ID %d, name %s.' % (
team.category,
team.id,
team.name
)
)
print('You might need to fill in the new category field manually for this team.')
def fill_invitation_full_name_and_email(apps, schema_editor):
TeamMember = apps.get_model('competition', 'TeamMember')
for team_member in TeamMember.objects.all():
if team_member.user is not None:
team_member.invitation_full_name = team_member.user.full_name
team_member.invitation_email = team_member.user.email
team_member.invitation_token = generate_team_invitation_token()
team_member.save()
class Migration(migrations.Migration):
dependencies = [
('competition', '0015_refactor_competition_1'),
]
operations = [
migrations.RunPython(convert_team_category_to_use_fk),
migrations.RunPython(fill_invitation_full_name_and_email),
]
| 35.18 | 103 | 0.681637 |
from string import ascii_uppercase, digits
from django.db import migrations
from django.utils.crypto import get_random_string
def generate_team_invitation_token():
return get_random_string(16, allowed_chars=ascii_uppercase + digits)
def convert_team_category_to_use_fk(apps, schema_editor):
Team = apps.get_model('competition', 'Team')
CompetitionCategory = apps.get_model('competition', 'CompetitionCategory')
for team in Team.objects.all():
try:
team.category_fk = CompetitionCategory.objects.get(name=team.category)
team.save()
except CompetitionCategory.DoesNotExist:
print(
'WARNING: CompetitionCategory with name %s does not exist for team ID %d, name %s.' % (
team.category,
team.id,
team.name
)
)
print('You might need to fill in the new category field manually for this team.')
def fill_invitation_full_name_and_email(apps, schema_editor):
TeamMember = apps.get_model('competition', 'TeamMember')
for team_member in TeamMember.objects.all():
if team_member.user is not None:
team_member.invitation_full_name = team_member.user.full_name
team_member.invitation_email = team_member.user.email
team_member.invitation_token = generate_team_invitation_token()
team_member.save()
class Migration(migrations.Migration):
dependencies = [
('competition', '0015_refactor_competition_1'),
]
operations = [
migrations.RunPython(convert_team_category_to_use_fk),
migrations.RunPython(fill_invitation_full_name_and_email),
]
| true | true |
1c35f6d16868b76e19b08e1e6c11d9dcc39079ca | 14,608 | py | Python | readalongs/cli.py | joanise/ReadAlong-Studio | d68e821a10c58938216f188311f99389a3d6ca64 | [
"MIT"
] | null | null | null | readalongs/cli.py | joanise/ReadAlong-Studio | d68e821a10c58938216f188311f99389a3d6ca64 | [
"MIT"
] | null | null | null | readalongs/cli.py | joanise/ReadAlong-Studio | d68e821a10c58938216f188311f99389a3d6ca64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
#
# cli.py
#
# Initializes a Command Line Interface with Click.
# The main purpose of the cli is to align input files.
#
# CLI commands implemented in this file:
# - align : main command to align text and audio
# - epub : convert aligned file to epub format
# - prepare: prepare XML input for align from plain text
#
# Default CLI commands provided by Flask:
# - routes : show available routes in the this readalongs Flask app
# - run : run the readalongs Flask app
# - shell : open a shell within the readalongs Flask application context
#
#######################################################################
import io
import json
import os
import shutil
import sys
from tempfile import TemporaryFile
import click
from flask.cli import FlaskGroup
from lxml import etree
from readalongs._version import __version__
from readalongs.align import (
align_audio,
convert_to_xhtml,
create_input_tei,
return_words_and_sentences,
write_to_subtitles,
write_to_text_grid,
)
from readalongs.app import app
from readalongs.audio_utils import read_audio_from_file
from readalongs.epub.create_epub import create_epub
from readalongs.log import LOGGER
from readalongs.text.make_smil import make_smil
from readalongs.text.tokenize_xml import tokenize_xml
from readalongs.text.util import save_minimal_index_html, save_txt, save_xml, write_xml
from readalongs.views import LANGS
def create_app():
""" Returns the app
"""
return app
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.version_option(version=__version__, prog_name="readalongs")
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS)
def cli():
"""Management script for Read Along Studio."""
@app.cli.command(
context_settings=CONTEXT_SETTINGS, short_help="Force align a text and a sound file."
)
@click.argument("textfile", type=click.Path(exists=True, readable=True))
@click.argument("audiofile", type=click.Path(exists=True, readable=True))
@click.argument("output-base", type=click.Path())
@click.option(
"-b",
"--bare",
is_flag=True,
help="Bare alignments do not split silences between words",
)
@click.option(
"-c",
"--config",
type=click.Path(exists=True),
help="Use ReadAlong-Studio configuration file (in JSON format)",
)
@click.option(
"-C",
"--closed-captioning",
is_flag=True,
help="Export sentences to WebVTT and SRT files",
)
@click.option("-d", "--debug", is_flag=True, help="Add debugging messages to logger")
@click.option(
"-f", "--force-overwrite", is_flag=True, help="Force overwrite output files"
)
@click.option(
"-i",
"--text-input",
is_flag=True,
help="Input is plain text (assume paragraphs are separated by blank lines, pages are separated by two blank lines)",
)
@click.option(
"-l",
"--language",
type=click.Choice(LANGS, case_sensitive=False),
help="Set language for plain text input",
)
@click.option(
"-u",
"--unit",
type=click.Choice(["w", "m"], case_sensitive=False),
help="Unit (w = word, m = morpheme) to align to",
)
@click.option(
"-s",
"--save-temps",
is_flag=True,
help="Save intermediate stages of processing and temporary files (dictionary, FSG, tokenization etc)",
)
@click.option(
"-t", "--text-grid", is_flag=True, help="Export to Praat TextGrid & ELAN eaf file"
)
@click.option(
"-x", "--output-xhtml", is_flag=True, help="Output simple XHTML instead of XML"
)
def align(**kwargs):
"""Align TEXTFILE and AUDIOFILE and create output files as OUTPUT_BASE.* in directory
OUTPUT_BASE/.
TEXTFILE: Input text file path (in XML, or plain text with -i)
AUDIOFILE: Input audio file path, in any format supported by ffmpeg
OUTPUT_BASE: Base name for output files
"""
config = kwargs.get("config", None)
if config:
if config.endswith("json"):
try:
with open(config) as f:
config = json.load(f)
except json.decoder.JSONDecodeError:
LOGGER.error(f"Config file at {config} is not valid json.")
else:
raise click.BadParameter(f"Config file '{config}' must be in JSON format")
output_dir = kwargs["output_base"]
if os.path.exists(output_dir):
if not os.path.isdir(output_dir):
raise click.UsageError(
f"Output folder '{output_dir}' already exists but is a not a directory."
)
if not kwargs["force_overwrite"]:
raise click.UsageError(
f"Output folder '{output_dir}' already exists, use -f to overwrite."
)
else:
os.mkdir(output_dir)
# Make sure we can write to the output directory, for early error checking and user
# friendly error messages.
try:
with TemporaryFile(dir=output_dir):
pass
except Exception:
raise click.UsageError(
f"Cannot write into output folder '{output_dir}'. Please verify permissions."
)
output_basename = os.path.basename(output_dir)
output_base = os.path.join(output_dir, output_basename)
temp_base = None
if kwargs["save_temps"]:
temp_dir = os.path.join(output_dir, "tempfiles")
if not os.path.isdir(temp_dir):
if os.path.exists(temp_dir) and kwargs["force_overwrite"]:
os.unlink(temp_dir)
os.mkdir(temp_dir)
temp_base = os.path.join(temp_dir, output_basename)
if kwargs["debug"]:
LOGGER.setLevel("DEBUG")
if kwargs["text_input"]:
if not kwargs["language"]:
LOGGER.warn("No input language provided, using undetermined mapping")
tempfile, kwargs["textfile"] = create_input_tei(
input_file_name=kwargs["textfile"],
text_language=kwargs["language"],
save_temps=temp_base,
)
if kwargs["output_xhtml"]:
tokenized_xml_path = "%s.xhtml" % output_base
else:
_, input_ext = os.path.splitext(kwargs["textfile"])
tokenized_xml_path = "%s%s" % (output_base, input_ext)
if os.path.exists(tokenized_xml_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % tokenized_xml_path
)
smil_path = output_base + ".smil"
if os.path.exists(smil_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % smil_path
)
_, audio_ext = os.path.splitext(kwargs["audiofile"])
audio_path = output_base + audio_ext
if os.path.exists(audio_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % audio_path
)
unit = kwargs.get("unit", "w")
bare = kwargs.get("bare", False)
if (
not unit
): # .get() above should handle this but apparently the way kwargs is implemented
unit = "w" # unit could still be None here.
try:
results = align_audio(
kwargs["textfile"],
kwargs["audiofile"],
unit=unit,
bare=bare,
config=config,
save_temps=temp_base,
)
except RuntimeError as e:
LOGGER.error(e)
exit(1)
if kwargs["text_grid"]:
audio = read_audio_from_file(kwargs["audiofile"])
duration = audio.frame_count() / audio.frame_rate
words, sentences = return_words_and_sentences(results)
textgrid = write_to_text_grid(words, sentences, duration)
textgrid.to_file(output_base + ".TextGrid")
textgrid.to_eaf().to_file(output_base + ".eaf")
if kwargs["closed_captioning"]:
words, sentences = return_words_and_sentences(results)
webvtt_sentences = write_to_subtitles(sentences)
webvtt_sentences.save(output_base + "_sentences.vtt")
webvtt_sentences.save_as_srt(output_base + "_sentences.srt")
webvtt_words = write_to_subtitles(words)
webvtt_words.save(output_base + "_words.vtt")
webvtt_words.save_as_srt(output_base + "_words.srt")
if kwargs["output_xhtml"]:
convert_to_xhtml(results["tokenized"])
save_minimal_index_html(
os.path.join(output_dir, "index.html"),
os.path.basename(tokenized_xml_path),
os.path.basename(smil_path),
os.path.basename(audio_path),
)
save_xml(tokenized_xml_path, results["tokenized"])
smil = make_smil(
os.path.basename(tokenized_xml_path), os.path.basename(audio_path), results
)
shutil.copy(kwargs["audiofile"], audio_path)
save_txt(smil_path, smil)
@app.cli.command(
context_settings=CONTEXT_SETTINGS, short_help="Convert a smil document to epub."
)
@click.argument("input", type=click.Path(exists=True, readable=True))
@click.argument("output", type=click.Path(exists=False, readable=True))
@click.option(
"-u",
"--unpacked",
is_flag=True,
help="Output unpacked directory of files (for testing)",
)
def epub(**kwargs):
"""
Convert INPUT smil document to epub with media overlay at OUTPUT
INPUT: The .smil document
OUTPUT: Path to the .epub output
"""
create_epub(kwargs["input"], kwargs["output"], kwargs["unpacked"])
@app.cli.command(
context_settings=CONTEXT_SETTINGS,
short_help="Prepare XML input to align from plain text.",
)
@click.argument("plaintextfile", type=click.File("rb"))
@click.argument("xmlfile", type=click.Path(), required=False, default="")
@click.option("-d", "--debug", is_flag=True, help="Add debugging messages to logger")
@click.option(
"-f", "--force-overwrite", is_flag=True, help="Force overwrite output files"
)
@click.option(
"-l",
"--language",
type=click.Choice(LANGS, case_sensitive=False),
required=True,
help="Set language for input file",
)
def prepare(**kwargs):
"""Prepare XMLFILE for 'readalongs align' from PLAINTEXTFILE.
PLAINTEXTFILE must be plain text encoded in utf-8, with one sentence per line,
paragraph breaks marked by a blank line, and page breaks marked by two
blank lines.
PLAINTEXTFILE: Path to the plain text input file, or - for stdin
XMLFILE: Path to the XML output file, or - for stdout [default: PLAINTEXTFILE.xml]
"""
if kwargs["debug"]:
LOGGER.setLevel("DEBUG")
LOGGER.info(
"Running readalongs prepare(lang={}, force-overwrite={}, plaintextfile={}, xmlfile={}).".format(
kwargs["language"],
kwargs["force_overwrite"],
kwargs["plaintextfile"],
kwargs["xmlfile"],
)
)
input_file = kwargs["plaintextfile"]
out_file = kwargs["xmlfile"]
if not out_file:
try:
out_file = input_file.name
except Exception: # For unit testing: simulated stdin stream has no .name attrib
out_file = "<stdin>"
if out_file == "<stdin>": # actual intput_file.name when cli input is "-"
out_file = "-"
else:
if out_file.endswith(".txt"):
out_file = out_file[:-4]
out_file += ".xml"
if out_file == "-":
filehandle, filename = create_input_tei(
input_file_handle=input_file, text_language=kwargs["language"],
)
with io.open(filename) as f:
sys.stdout.write(f.read())
else:
if not out_file.endswith(".xml"):
out_file += ".xml"
if os.path.exists(out_file) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % out_file
)
filehandle, filename = create_input_tei(
input_file_handle=input_file,
text_language=kwargs["language"],
output_file=out_file,
)
LOGGER.info("Wrote {}".format(out_file))
@app.cli.command(
context_settings=CONTEXT_SETTINGS,
short_help="Tokenize XML file to align from XML file produced by prepare.",
)
@click.argument("xmlfile", type=click.File("rb"))
@click.argument("tokfile", type=click.Path(), required=False, default="")
@click.option("-d", "--debug", is_flag=True, help="Add debugging messages to logger")
@click.option(
"-f", "--force-overwrite", is_flag=True, help="Force overwrite output files"
)
def tokenize(**kwargs):
"""Tokenize XMLFILE for 'readalongs align' into TOKFILE.
XMLFILE should have been produce by 'readalongs prepare'.
TOKFILE can be augmented with word-specific language codes.
'readalongs align' can be called with either XMLFILE or TOKFILE as XML input.
XMLFILE: Path to the XML file to tokenize, or - for stdin
TOKFILE: Output path for the tok'd XML, or - for stdout [default: XMLFILE.tokenized.xml]
"""
xmlfile = kwargs["xmlfile"]
if kwargs["debug"]:
LOGGER.setLevel("DEBUG")
LOGGER.info(
"Running readalongs tokenize(xmlfile={}, tokfile={}, force-overwrite={}).".format(
kwargs["xmlfile"], kwargs["tokfile"], kwargs["force_overwrite"],
)
)
if not kwargs["tokfile"]:
try:
output_tok_path = xmlfile.name
except Exception:
output_tok_path = "<stdin>"
if output_tok_path == "<stdin>":
output_tok_path = "-"
else:
if output_tok_path.endswith(".xml"):
output_tok_path = output_tok_path[:-4]
output_tok_path += ".tokenized.xml"
else:
output_tok_path = kwargs["tokfile"]
if not output_tok_path.endswith(".xml") and not output_tok_path == "-":
output_tok_path += ".xml"
if os.path.exists(output_tok_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % output_tok_path
)
try:
xml = etree.parse(xmlfile).getroot()
except etree.XMLSyntaxError as e:
raise click.BadParameter(
"Error parsing input file %s as XML, please verify it. Parser error: %s"
% (xmlfile, e)
)
xml = tokenize_xml(xml)
if output_tok_path == "-":
write_xml(sys.stdout.buffer, xml)
else:
save_xml(output_tok_path, xml)
LOGGER.info("Wrote {}".format(output_tok_path))
| 33.972093 | 120 | 0.638896 |
okenized_xml_path = "%s%s" % (output_base, input_ext)
if os.path.exists(tokenized_xml_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % tokenized_xml_path
)
smil_path = output_base + ".smil"
if os.path.exists(smil_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % smil_path
)
_, audio_ext = os.path.splitext(kwargs["audiofile"])
audio_path = output_base + audio_ext
if os.path.exists(audio_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % audio_path
)
unit = kwargs.get("unit", "w")
bare = kwargs.get("bare", False)
if (
not unit
):
unit = "w"
try:
results = align_audio(
kwargs["textfile"],
kwargs["audiofile"],
unit=unit,
bare=bare,
config=config,
save_temps=temp_base,
)
except RuntimeError as e:
LOGGER.error(e)
exit(1)
if kwargs["text_grid"]:
audio = read_audio_from_file(kwargs["audiofile"])
duration = audio.frame_count() / audio.frame_rate
words, sentences = return_words_and_sentences(results)
textgrid = write_to_text_grid(words, sentences, duration)
textgrid.to_file(output_base + ".TextGrid")
textgrid.to_eaf().to_file(output_base + ".eaf")
if kwargs["closed_captioning"]:
words, sentences = return_words_and_sentences(results)
webvtt_sentences = write_to_subtitles(sentences)
webvtt_sentences.save(output_base + "_sentences.vtt")
webvtt_sentences.save_as_srt(output_base + "_sentences.srt")
webvtt_words = write_to_subtitles(words)
webvtt_words.save(output_base + "_words.vtt")
webvtt_words.save_as_srt(output_base + "_words.srt")
if kwargs["output_xhtml"]:
convert_to_xhtml(results["tokenized"])
save_minimal_index_html(
os.path.join(output_dir, "index.html"),
os.path.basename(tokenized_xml_path),
os.path.basename(smil_path),
os.path.basename(audio_path),
)
save_xml(tokenized_xml_path, results["tokenized"])
smil = make_smil(
os.path.basename(tokenized_xml_path), os.path.basename(audio_path), results
)
shutil.copy(kwargs["audiofile"], audio_path)
save_txt(smil_path, smil)
@app.cli.command(
context_settings=CONTEXT_SETTINGS, short_help="Convert a smil document to epub."
)
@click.argument("input", type=click.Path(exists=True, readable=True))
@click.argument("output", type=click.Path(exists=False, readable=True))
@click.option(
"-u",
"--unpacked",
is_flag=True,
help="Output unpacked directory of files (for testing)",
)
def epub(**kwargs):
create_epub(kwargs["input"], kwargs["output"], kwargs["unpacked"])
@app.cli.command(
context_settings=CONTEXT_SETTINGS,
short_help="Prepare XML input to align from plain text.",
)
@click.argument("plaintextfile", type=click.File("rb"))
@click.argument("xmlfile", type=click.Path(), required=False, default="")
@click.option("-d", "--debug", is_flag=True, help="Add debugging messages to logger")
@click.option(
"-f", "--force-overwrite", is_flag=True, help="Force overwrite output files"
)
@click.option(
"-l",
"--language",
type=click.Choice(LANGS, case_sensitive=False),
required=True,
help="Set language for input file",
)
def prepare(**kwargs):
if kwargs["debug"]:
LOGGER.setLevel("DEBUG")
LOGGER.info(
"Running readalongs prepare(lang={}, force-overwrite={}, plaintextfile={}, xmlfile={}).".format(
kwargs["language"],
kwargs["force_overwrite"],
kwargs["plaintextfile"],
kwargs["xmlfile"],
)
)
input_file = kwargs["plaintextfile"]
out_file = kwargs["xmlfile"]
if not out_file:
try:
out_file = input_file.name
except Exception:
out_file = "<stdin>"
if out_file == "<stdin>":
out_file = "-"
else:
if out_file.endswith(".txt"):
out_file = out_file[:-4]
out_file += ".xml"
if out_file == "-":
filehandle, filename = create_input_tei(
input_file_handle=input_file, text_language=kwargs["language"],
)
with io.open(filename) as f:
sys.stdout.write(f.read())
else:
if not out_file.endswith(".xml"):
out_file += ".xml"
if os.path.exists(out_file) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % out_file
)
filehandle, filename = create_input_tei(
input_file_handle=input_file,
text_language=kwargs["language"],
output_file=out_file,
)
LOGGER.info("Wrote {}".format(out_file))
@app.cli.command(
context_settings=CONTEXT_SETTINGS,
short_help="Tokenize XML file to align from XML file produced by prepare.",
)
@click.argument("xmlfile", type=click.File("rb"))
@click.argument("tokfile", type=click.Path(), required=False, default="")
@click.option("-d", "--debug", is_flag=True, help="Add debugging messages to logger")
@click.option(
"-f", "--force-overwrite", is_flag=True, help="Force overwrite output files"
)
def tokenize(**kwargs):
xmlfile = kwargs["xmlfile"]
if kwargs["debug"]:
LOGGER.setLevel("DEBUG")
LOGGER.info(
"Running readalongs tokenize(xmlfile={}, tokfile={}, force-overwrite={}).".format(
kwargs["xmlfile"], kwargs["tokfile"], kwargs["force_overwrite"],
)
)
if not kwargs["tokfile"]:
try:
output_tok_path = xmlfile.name
except Exception:
output_tok_path = "<stdin>"
if output_tok_path == "<stdin>":
output_tok_path = "-"
else:
if output_tok_path.endswith(".xml"):
output_tok_path = output_tok_path[:-4]
output_tok_path += ".tokenized.xml"
else:
output_tok_path = kwargs["tokfile"]
if not output_tok_path.endswith(".xml") and not output_tok_path == "-":
output_tok_path += ".xml"
if os.path.exists(output_tok_path) and not kwargs["force_overwrite"]:
raise click.BadParameter(
"Output file %s exists already, use -f to overwrite." % output_tok_path
)
try:
xml = etree.parse(xmlfile).getroot()
except etree.XMLSyntaxError as e:
raise click.BadParameter(
"Error parsing input file %s as XML, please verify it. Parser error: %s"
% (xmlfile, e)
)
xml = tokenize_xml(xml)
if output_tok_path == "-":
write_xml(sys.stdout.buffer, xml)
else:
save_xml(output_tok_path, xml)
LOGGER.info("Wrote {}".format(output_tok_path))
| true | true |
1c35f7705579fc799bca6e671589cc6191a30340 | 5,131 | py | Python | workflow_clues.py | erikfogh/relate-clues | aa4d34f5af2b960cc16f0e9b3462e184cadff239 | [
"MIT"
] | null | null | null | workflow_clues.py | erikfogh/relate-clues | aa4d34f5af2b960cc16f0e9b3462e184cadff239 | [
"MIT"
] | null | null | null | workflow_clues.py | erikfogh/relate-clues | aa4d34f5af2b960cc16f0e9b3462e184cadff239 | [
"MIT"
] | null | null | null | from gwf import Workflow, AnonymousTarget
import os
from groups import Group
os.environ['NUMEXPR_MAX_THREADS'] = '8' # to suppress a warning
gwf = Workflow()
# Absolute paths to various parts - possibly rewrite to use relative paths
path_to_relate = "/faststorage/home/eriks/relate-clues/relate_v1.1.2_x86_64_dynamic/"
path_to_prepared = "/faststorage/home/eriks/relate-clues/steps/"
path_to_script = "/faststorage/home/eriks/relate-clues/scripts/clues_master.py"
path_to_results = "/faststorage/home/eriks/relate-clues/results/"
path_to_relate_results = "/faststorage/home/eriks/relate-clues/results/"
summarize_script = "/faststorage/home/eriks/relate-clues/scripts/summarize_clues.py"
table_gen_script = "/faststorage/home/eriks/relate-clues/scripts/table_generation.py"
# Name of directory, followed by mutation rate and populaiton size.
pop_information = {"CEU_kept_prepared/": ["1.25e-8", "30000"],
} # "YRI_kept_prepared/": ["1.25e-8", "32000"]
# specification of clues variables. Timebins indicates the time to test for selection
# burn_in and thinning are used for MCMC sampling
timebins = "/faststorage/home/eriks/relate-clues/data/clues_supporting/time1500_2500.txt"
relate_mcmc = 150
burn_in = 50
thinning = 1
daf_bound = 0.25
prioritize_sites = False
# Chromosome number, start and end of window, as well as number of tests in total and number of tests per job.
chromosome = "2"
window_start = 136000000
window_end = 137000000
number_of_tests = 100
tests_per_job = 10
# Name added to dir
identifier = "no_prio_ooa"
dir_suffix = "chrom{}_{}_{}_{}_{}".format(chromosome, window_start, window_end, number_of_tests, identifier)
def table_gen(script, relate_results, start, end, spacing, daf_bound, prioritize, out_dir):
i = relate_results+".freq"
inputs = [i]
outputs = out_dir+"clues_table_temp.txt"
options = {
"cores": 2,
"memory": "10g",
"walltime": "1:00:00"
}
spec = """
python {} {} {} {} {} {} {} -o {}
""".format(script, relate_results, start, end, int(spacing), daf_bound, prioritize, out_dir)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def relate_clues(chunk, chunk_number, script, number, out_dir, pop_inf, input_dir, relate_path, timebins, mcmc, burnin, thin):
i = input_dir+"chrom{}_popsize.coal".format(number)
inputs = [out_dir+"clues_table_temp.txt"]
outputs = out_dir+"chunk{}_table.txt".format(chunk)
walltime = 10*(tests_per_job//10)+10
options = {
"cores": 10,
"memory": "30g",
"walltime": "{}:00:00".format(walltime)
}
spec = """
cd clues/
python {} {} {} -i {} -o {} -m {} -r {} -b {} --mcmc {} --burnin {} --thin {}
""".format(script, chunk, chunk_number, i[:-5], out_dir, pop_inf[0], relate_path, timebins, mcmc, burnin, thin)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def summarize_clues(clues_results, script, out_dir):
inputs = clues_results
outputs = out_dir+"clues_table.txt"
options = {
"cores": 2,
"memory": "4g",
"walltime": "1:00:00"
}
spec = """
python {} -i {} -o {}
""".format(script, out_dir, outputs)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
site_spacing = (window_end-window_start)/number_of_tests
chunk_number = number_of_tests//tests_per_job + (number_of_tests % tests_per_job > 0)
chunk_list = list(range(1, chunk_number+1))
print("""Trying to place a site every {} bases, leading to {} tests.
""".format(site_spacing, number_of_tests))
for pop in pop_information:
pop_name = pop[:-10]
out_dir = os.getcwd()+"/results/{}_{}_clues/".format(pop_name, dir_suffix)
os.makedirs(out_dir+"tmp/", exist_ok=True)
relate_results_path = path_to_relate_results+"{}_relate/chrom{}_selection".format(pop_name, chromosome)
with Group(gwf, suffix=pop_name+"_"+identifier) as g:
g.target_from_template("temp_table",
table_gen(table_gen_script, relate_results_path, window_start,
window_end, site_spacing, daf_bound, prioritize_sites, out_dir))
total_workflow = g.map(relate_clues, chunk_list,
name="chrom_{}_{}_{}".format(chromosome, window_start, number_of_tests),
extra={"chunk_number": chunk_number, "script": path_to_script,
"number": chromosome, "out_dir": out_dir,
"pop_inf": pop_information[pop], "input_dir": path_to_prepared+"{}_relate/".format(pop_name),
"relate_path": path_to_relate, "timebins": timebins,
"mcmc": relate_mcmc, "burnin": burn_in, "thin": thinning})
g.target_from_template("summarize",
summarize_clues(total_workflow.outputs, script=summarize_script,
out_dir=out_dir))
| 45.8125 | 131 | 0.659131 | from gwf import Workflow, AnonymousTarget
import os
from groups import Group
os.environ['NUMEXPR_MAX_THREADS'] = '8'
gwf = Workflow()
path_to_relate = "/faststorage/home/eriks/relate-clues/relate_v1.1.2_x86_64_dynamic/"
path_to_prepared = "/faststorage/home/eriks/relate-clues/steps/"
path_to_script = "/faststorage/home/eriks/relate-clues/scripts/clues_master.py"
path_to_results = "/faststorage/home/eriks/relate-clues/results/"
path_to_relate_results = "/faststorage/home/eriks/relate-clues/results/"
summarize_script = "/faststorage/home/eriks/relate-clues/scripts/summarize_clues.py"
table_gen_script = "/faststorage/home/eriks/relate-clues/scripts/table_generation.py"
pop_information = {"CEU_kept_prepared/": ["1.25e-8", "30000"],
}
timebins = "/faststorage/home/eriks/relate-clues/data/clues_supporting/time1500_2500.txt"
relate_mcmc = 150
burn_in = 50
thinning = 1
daf_bound = 0.25
prioritize_sites = False
chromosome = "2"
window_start = 136000000
window_end = 137000000
number_of_tests = 100
tests_per_job = 10
identifier = "no_prio_ooa"
dir_suffix = "chrom{}_{}_{}_{}_{}".format(chromosome, window_start, window_end, number_of_tests, identifier)
def table_gen(script, relate_results, start, end, spacing, daf_bound, prioritize, out_dir):
i = relate_results+".freq"
inputs = [i]
outputs = out_dir+"clues_table_temp.txt"
options = {
"cores": 2,
"memory": "10g",
"walltime": "1:00:00"
}
spec = """
python {} {} {} {} {} {} {} -o {}
""".format(script, relate_results, start, end, int(spacing), daf_bound, prioritize, out_dir)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def relate_clues(chunk, chunk_number, script, number, out_dir, pop_inf, input_dir, relate_path, timebins, mcmc, burnin, thin):
i = input_dir+"chrom{}_popsize.coal".format(number)
inputs = [out_dir+"clues_table_temp.txt"]
outputs = out_dir+"chunk{}_table.txt".format(chunk)
walltime = 10*(tests_per_job//10)+10
options = {
"cores": 10,
"memory": "30g",
"walltime": "{}:00:00".format(walltime)
}
spec = """
cd clues/
python {} {} {} -i {} -o {} -m {} -r {} -b {} --mcmc {} --burnin {} --thin {}
""".format(script, chunk, chunk_number, i[:-5], out_dir, pop_inf[0], relate_path, timebins, mcmc, burnin, thin)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
def summarize_clues(clues_results, script, out_dir):
inputs = clues_results
outputs = out_dir+"clues_table.txt"
options = {
"cores": 2,
"memory": "4g",
"walltime": "1:00:00"
}
spec = """
python {} -i {} -o {}
""".format(script, out_dir, outputs)
print(spec)
return AnonymousTarget(inputs=inputs, outputs=outputs, options=options, spec=spec)
site_spacing = (window_end-window_start)/number_of_tests
chunk_number = number_of_tests//tests_per_job + (number_of_tests % tests_per_job > 0)
chunk_list = list(range(1, chunk_number+1))
print("""Trying to place a site every {} bases, leading to {} tests.
""".format(site_spacing, number_of_tests))
for pop in pop_information:
pop_name = pop[:-10]
out_dir = os.getcwd()+"/results/{}_{}_clues/".format(pop_name, dir_suffix)
os.makedirs(out_dir+"tmp/", exist_ok=True)
relate_results_path = path_to_relate_results+"{}_relate/chrom{}_selection".format(pop_name, chromosome)
with Group(gwf, suffix=pop_name+"_"+identifier) as g:
g.target_from_template("temp_table",
table_gen(table_gen_script, relate_results_path, window_start,
window_end, site_spacing, daf_bound, prioritize_sites, out_dir))
total_workflow = g.map(relate_clues, chunk_list,
name="chrom_{}_{}_{}".format(chromosome, window_start, number_of_tests),
extra={"chunk_number": chunk_number, "script": path_to_script,
"number": chromosome, "out_dir": out_dir,
"pop_inf": pop_information[pop], "input_dir": path_to_prepared+"{}_relate/".format(pop_name),
"relate_path": path_to_relate, "timebins": timebins,
"mcmc": relate_mcmc, "burnin": burn_in, "thin": thinning})
g.target_from_template("summarize",
summarize_clues(total_workflow.outputs, script=summarize_script,
out_dir=out_dir))
| true | true |
1c35f82579848e6ecb2f941bb1f17166f8aa4a9f | 11,229 | py | Python | bottleLdap/bottleLdap.py | zwgtdev/bottleLdap | 36dd6f8978a37124fcd0d1a3970cc0fc68e8ad0f | [
"MIT"
] | null | null | null | bottleLdap/bottleLdap.py | zwgtdev/bottleLdap | 36dd6f8978a37124fcd0d1a3970cc0fc68e8ad0f | [
"MIT"
] | null | null | null | bottleLdap/bottleLdap.py | zwgtdev/bottleLdap | 36dd6f8978a37124fcd0d1a3970cc0fc68e8ad0f | [
"MIT"
] | null | null | null | import sys
import os
import json
import ldap
import bottle
from collections import OrderedDict
class AAAException(Exception):
"""Generic Authentication/Authorization Exception"""
pass
class AuthException(AAAException):
"""Authentication Exception: incorrect username/password pair"""
pass
class UserExists(AAAException):
pass
class JsonRoleStore(object):
"""Json based role store"""
def __init__(self, directory, filename='roles.json'):
self.directory = directory
self.filename = filename
self._pathfinder()
self._load_roles()
def _pathfinder(self):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(os.path.join(self.directory, self.filename)):
self._save_roles({})
def _load_roles(self):
try:
with open(os.path.join(self.directory, self.filename), 'r') as role_file:
self.roles = json.loads(role_file.read())
except:
self.roles = OrderedDict()
def _save_roles(self, data):
with open(os.path.join(self.directory, self.filename), 'w') as json_file:
json_file.write(json.dumps(data))
self._load_roles()
def update_user_roles(self, role_data):
self._save_roles(role_data)
class BaseAuth(object):
"""BaseAuth is an authentication module that uses AD to authenticate a user."""
def __init__(self, ldap_server, ldap_domain, session_key_name=None, session_domain=None, search_domain=None):
"""Auth class
:param ldap_server: ldap server address
:type ldap_server: str.
:param ldap_domain: ldap domain name
:type ldap_domain: str.
"""
super(BaseAuth, self).__init__()
self.ldap_server = ldap_server
self.ldap_domain = ldap_domain
self.search_domain = search_domain
self.session_key_name = session_key_name or 'beaker.session'
self.session_domain = session_domain
self._store = JsonRoleStore('users')
def login(self, username, password, success_redirect=None,
fail_redirect=None):
"""Check login credentials for an existing user.
Optionally redirect the user to another page (typically /login)
:param username: username
:type username: str or unicode.
:param password: cleartext password
:type password: str.or unicode
:param success_redirect: redirect authorized users (optional)
:type success_redirect: str.
:param fail_redirect: redirect unauthorized users (optional)
:type fail_redirect: str.
:returns: True for successful logins, else False
"""
# assert isinstance(username, type(u'')), "username must be a string"
# assert isinstance(password, type(u'')), "password must be a string"
auth_res, user_details = self._verify_password(
username,
password,
)
if auth_res == 'Authenticated':
# Setup session data
self._setup_cookie(username, user_details)
self._check_roles(username)
if success_redirect:
self._redirect(success_redirect)
return True
if fail_redirect:
self._redirect(fail_redirect)
return False
def logout(self, success_redirect='/login', fail_redirect='/login'):
"""Log the user out, remove cookie
:param success_redirect: redirect the user after logging out
:type success_redirect: str.
:param fail_redirect: redirect the user if it is not logged in
:type fail_redirect: str.
"""
try:
session = self._beaker_session
session.delete()
except Exception as e:
log.debug("Exception %s while logging out." % repr(e))
self._redirect(fail_redirect)
self._redirect(success_redirect)
def make_auth_decorator(self, role=None,
fail_redirect='/login'):
'''
Create a decorator to be used for authentication and authorization
:param username: A resource can be protected for a specific user
:param role: Minimum role level required for authorization
:param role: Only this role gets authorized
:param fail_redirect: The URL to redirect to if a login is required.
'''
session_manager = self
def auth_require(role=role,
fail_redirect=fail_redirect):
def decorator(func):
import functools
@functools.wraps(func)
def wrapper(*a, **ka):
session_manager.require(
role=role,
fail_redirect=fail_redirect)
return func(*a, **ka)
return wrapper
return decorator
return(auth_require)
def require(self, role=None,
fail_redirect=None):
"""Ensure the user is logged in has the required role (or higher).
Optionally redirect the user to another page (typically /login)
If both `username` and `role` are specified, both conditions need to be
satisfied.
If none is specified, any authenticated user will be authorized.
By default, any role with higher level than `role` will be authorized;
set role=True to prevent this.
:param role: require user role to match `role` strictly
:type role: bool.
:param redirect: redirect unauthorized users (optional)
:type redirect: str.
"""
# Parameter validation
# Authentication
try:
cu = self.current_user
except AAAException:
if fail_redirect is None:
raise AuthException("Unauthenticated user")
else:
self._redirect(fail_redirect)
if role:
# A specific role is required
if role in self.current_user.roles:
return
if fail_redirect is None:
raise AuthException("Unauthorized access: incorrect role")
self._redirect(fail_redirect)
if fail_redirect is None:
raise AuthException("Unauthorized access: ")
self._redirect(fail_redirect)
return # success
def list_user_roles(self):
"""List users.
:return: (username, roles) generator (sorted by
username)
"""
return self._store.roles
def list_users(self):
return self._store.roles.keys()
def update_user_role(self, data):
user_roles = self._store.roles.get(data.get('username').lower())
if not data.get('state'):
user_roles.remove(data.get('role'))
else:
user_roles.append(data.get('role'))
self._store.roles[data.get('username').lower()] = user_roles
self._store.update_user_roles(self._store.roles)
@property
def current_user(self):
"""Current autenticated user
:returns: User() instance, if authenticated
:raises: AuthException otherwise
"""
session = self._beaker_session
username = session.get('username', None)
if username is None:
raise AuthException("Unauthenticated user")
if username is not None:
return User(username, self, session=session)
raise AuthException("Unknown user: %s" % username)
@property
def user_is_anonymous(self):
"""Check if the current user is anonymous.
:returns: True if the user is anonymous, False otherwise
:raises: AuthException if the session username is unknown
"""
try:
username = self._beaker_session['username']
except KeyError:
return True
return False
def _check_roles(self, username):
if username.lower() not in [x.lower() for x in self._store.roles.keys()]:
self._store.roles[username.lower()] = ['user']
self._store.update_user_roles(self._store.roles)
def _verify_password(self, username, password):
"""Verify credentials
:param username: AD Username
:type username: str.
:param password: AD Password
:type password: str.
"""
try:
try:
conn = ldap.initialize(
'ldap://{0}'.format(self.ldap_server), bytes_mode=True)
except:
conn = ldap.initialize(
'ldap://{0}'.format(self.ldap_server), bytes_mode=False)
conn.set_option(ldap.OPT_REFERRALS, 0)
conn.simple_bind_s('{0}@{1}'.format(
username, self.ldap_domain), password)
user_details = conn.search_s(self.search_domain,
ldap.SCOPE_SUBTREE, 'userPrincipalName=%s@%s' % (username, self.ldap_domain), ['mail', 'givenname', 'cn'])[0][1]
for key in user_details:
user_details[key] = user_details.get(key)[0]
except ldap.INVALID_CREDENTIALS:
return "Your credentials are invalid", None
except ldap.SERVER_DOWN:
return "The AD server appears to be down", None
except ldap.LDAPError as e:
if type(e.message) == dict and e.message.has_key('desc'):
return e.message['desc'], None
else:
return e, None
else:
conn.unbind_s()
return 'Authenticated', user_details
def _setup_cookie(self, username, user_details):
"""Setup cookie for a user that just logged in"""
session = self._beaker_session
session['username'] = username.lower()
session['short_name'] = user_details.get('givenName')
session['full_name'] = user_details.get('cn')
session['email'] = user_details.get('mail')
if self.session_domain is not None:
session.domain = self.session_domain
self._save_session()
class User(object):
def __init__(self, username, auth_obj, session=None):
"""Represent an authenticated user, exposing useful attributes:
username, role, level, description, email_addr, session_creation_time,
session_accessed_time, session_id. The session-related attributes are
available for the current user only.
:param username: username
:type username: str.
:param auth_obj: instance of :class:`Auth`
"""
self._auth = auth_obj
self.username = username.lower()
self.short_name = session.get('short_name', None)
self.full_name = session.get('full_name', None)
self.email = session.get('email', None)
self.roles = self._auth._store.roles.get(self.username, [])
class Auth(BaseAuth):
@staticmethod
def _redirect(location):
bottle.redirect(location)
@property
def _beaker_session(self):
"""Get session"""
return bottle.request.environ.get(self.session_key_name)
def _save_session(self):
self._beaker_session.save()
| 34.550769 | 153 | 0.609849 | import sys
import os
import json
import ldap
import bottle
from collections import OrderedDict
class AAAException(Exception):
pass
class AuthException(AAAException):
pass
class UserExists(AAAException):
pass
class JsonRoleStore(object):
def __init__(self, directory, filename='roles.json'):
self.directory = directory
self.filename = filename
self._pathfinder()
self._load_roles()
def _pathfinder(self):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(os.path.join(self.directory, self.filename)):
self._save_roles({})
def _load_roles(self):
try:
with open(os.path.join(self.directory, self.filename), 'r') as role_file:
self.roles = json.loads(role_file.read())
except:
self.roles = OrderedDict()
def _save_roles(self, data):
with open(os.path.join(self.directory, self.filename), 'w') as json_file:
json_file.write(json.dumps(data))
self._load_roles()
def update_user_roles(self, role_data):
self._save_roles(role_data)
class BaseAuth(object):
def __init__(self, ldap_server, ldap_domain, session_key_name=None, session_domain=None, search_domain=None):
super(BaseAuth, self).__init__()
self.ldap_server = ldap_server
self.ldap_domain = ldap_domain
self.search_domain = search_domain
self.session_key_name = session_key_name or 'beaker.session'
self.session_domain = session_domain
self._store = JsonRoleStore('users')
def login(self, username, password, success_redirect=None,
fail_redirect=None):
auth_res, user_details = self._verify_password(
username,
password,
)
if auth_res == 'Authenticated':
self._setup_cookie(username, user_details)
self._check_roles(username)
if success_redirect:
self._redirect(success_redirect)
return True
if fail_redirect:
self._redirect(fail_redirect)
return False
def logout(self, success_redirect='/login', fail_redirect='/login'):
try:
session = self._beaker_session
session.delete()
except Exception as e:
log.debug("Exception %s while logging out." % repr(e))
self._redirect(fail_redirect)
self._redirect(success_redirect)
def make_auth_decorator(self, role=None,
fail_redirect='/login'):
session_manager = self
def auth_require(role=role,
fail_redirect=fail_redirect):
def decorator(func):
import functools
@functools.wraps(func)
def wrapper(*a, **ka):
session_manager.require(
role=role,
fail_redirect=fail_redirect)
return func(*a, **ka)
return wrapper
return decorator
return(auth_require)
def require(self, role=None,
fail_redirect=None):
try:
cu = self.current_user
except AAAException:
if fail_redirect is None:
raise AuthException("Unauthenticated user")
else:
self._redirect(fail_redirect)
if role:
if role in self.current_user.roles:
return
if fail_redirect is None:
raise AuthException("Unauthorized access: incorrect role")
self._redirect(fail_redirect)
if fail_redirect is None:
raise AuthException("Unauthorized access: ")
self._redirect(fail_redirect)
return
def list_user_roles(self):
return self._store.roles
def list_users(self):
return self._store.roles.keys()
def update_user_role(self, data):
user_roles = self._store.roles.get(data.get('username').lower())
if not data.get('state'):
user_roles.remove(data.get('role'))
else:
user_roles.append(data.get('role'))
self._store.roles[data.get('username').lower()] = user_roles
self._store.update_user_roles(self._store.roles)
@property
def current_user(self):
session = self._beaker_session
username = session.get('username', None)
if username is None:
raise AuthException("Unauthenticated user")
if username is not None:
return User(username, self, session=session)
raise AuthException("Unknown user: %s" % username)
@property
def user_is_anonymous(self):
try:
username = self._beaker_session['username']
except KeyError:
return True
return False
def _check_roles(self, username):
if username.lower() not in [x.lower() for x in self._store.roles.keys()]:
self._store.roles[username.lower()] = ['user']
self._store.update_user_roles(self._store.roles)
def _verify_password(self, username, password):
try:
try:
conn = ldap.initialize(
'ldap://{0}'.format(self.ldap_server), bytes_mode=True)
except:
conn = ldap.initialize(
'ldap://{0}'.format(self.ldap_server), bytes_mode=False)
conn.set_option(ldap.OPT_REFERRALS, 0)
conn.simple_bind_s('{0}@{1}'.format(
username, self.ldap_domain), password)
user_details = conn.search_s(self.search_domain,
ldap.SCOPE_SUBTREE, 'userPrincipalName=%s@%s' % (username, self.ldap_domain), ['mail', 'givenname', 'cn'])[0][1]
for key in user_details:
user_details[key] = user_details.get(key)[0]
except ldap.INVALID_CREDENTIALS:
return "Your credentials are invalid", None
except ldap.SERVER_DOWN:
return "The AD server appears to be down", None
except ldap.LDAPError as e:
if type(e.message) == dict and e.message.has_key('desc'):
return e.message['desc'], None
else:
return e, None
else:
conn.unbind_s()
return 'Authenticated', user_details
def _setup_cookie(self, username, user_details):
session = self._beaker_session
session['username'] = username.lower()
session['short_name'] = user_details.get('givenName')
session['full_name'] = user_details.get('cn')
session['email'] = user_details.get('mail')
if self.session_domain is not None:
session.domain = self.session_domain
self._save_session()
class User(object):
def __init__(self, username, auth_obj, session=None):
self._auth = auth_obj
self.username = username.lower()
self.short_name = session.get('short_name', None)
self.full_name = session.get('full_name', None)
self.email = session.get('email', None)
self.roles = self._auth._store.roles.get(self.username, [])
class Auth(BaseAuth):
@staticmethod
def _redirect(location):
bottle.redirect(location)
@property
def _beaker_session(self):
return bottle.request.environ.get(self.session_key_name)
def _save_session(self):
self._beaker_session.save()
| true | true |
1c35f8553536b7c20662b81ec1cea625e26ab5dc | 58,541 | py | Python | src/azure-firewall/azext_firewall/custom.py | needuv/azure-cli-extensions | 58a8fe5ee4738078ca81a1ba0aae1b12a805277e | [
"MIT"
] | null | null | null | src/azure-firewall/azext_firewall/custom.py | needuv/azure-cli-extensions | 58a8fe5ee4738078ca81a1ba0aae1b12a805277e | [
"MIT"
] | 1 | 2021-05-25T20:16:59.000Z | 2021-05-25T20:16:59.000Z | src/azure-firewall/azext_firewall/custom.py | needuv/azure-cli-extensions | 58a8fe5ee4738078ca81a1ba0aae1b12a805277e | [
"MIT"
] | 5 | 2020-09-08T22:46:48.000Z | 2020-11-08T14:54:35.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import copy
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.util import sdk_no_wait
from azure.cli.core.azclierror import UserFault, ServiceError
from ._client_factory import network_client_factory
logger = get_logger(__name__)
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def _get_property(items, name):
result = next((x for x in items if x.name.lower() == name.lower()), None)
if not result:
raise CLIError("Property '{}' does not exist".format(name))
return result
def _upsert(parent, collection_name, obj_to_add, key_name, warn=True):
if not getattr(parent, collection_name, None):
setattr(parent, collection_name, [])
collection = getattr(parent, collection_name, None)
value = getattr(obj_to_add, key_name)
if value is None:
raise CLIError(
"Unable to resolve a value for key '{}' with which to match.".format(key_name))
match = next((x for x in collection if getattr(x, key_name, None) == value), None)
if match:
if warn:
logger.warning("Item '%s' already exists. Replacing with new values.", value)
collection.remove(match)
collection.append(obj_to_add)
def _find_item_at_path(instance, path):
# path accepts the pattern property/name/property/name
curr_item = instance
path_comps = path.split('.')
for i, comp in enumerate(path_comps):
if i % 2:
# name
curr_item = next((x for x in curr_item if x.name == comp), None)
else:
# property
curr_item = getattr(curr_item, comp, None)
if not curr_item:
raise CLIError("unable to find '{}'...".format(comp))
return curr_item
# region AzureFirewall
def create_azure_firewall(cmd, resource_group_name, azure_firewall_name, location=None,
tags=None, zones=None, private_ranges=None, firewall_policy=None,
virtual_hub=None, sku=None,
dns_servers=None, enable_dns_proxy=None,
threat_intel_mode=None, hub_public_ip_count=None, allow_active_ftp=None, tier=None):
if firewall_policy and any([enable_dns_proxy, dns_servers]):
raise CLIError('usage error: firewall policy and dns settings cannot co-exist.')
if sku and sku.lower() == 'azfw_hub' and not all([virtual_hub, hub_public_ip_count]):
raise CLIError('usage error: virtual hub and hub ip addresses are mandatory for azure firewall on virtual hub.')
if sku and sku.lower() == 'azfw_hub' and allow_active_ftp:
raise CLIError('usage error: allow active ftp is not allowed for azure firewall on virtual hub.')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
(AzureFirewall,
SubResource,
AzureFirewallSku,
HubIPAddresses,
HubPublicIPAddresses) = cmd.get_models('AzureFirewall',
'SubResource',
'AzureFirewallSku',
'HubIPAddresses',
'HubPublicIPAddresses')
sku_instance = AzureFirewallSku(name=sku, tier=tier)
firewall = AzureFirewall(location=location,
tags=tags,
zones=zones,
additional_properties={},
virtual_hub=SubResource(id=virtual_hub) if virtual_hub is not None else None,
firewall_policy=SubResource(id=firewall_policy) if firewall_policy is not None else None,
sku=sku_instance if sku is not None else None,
threat_intel_mode=threat_intel_mode,
hub_ip_addresses=HubIPAddresses(
public_i_ps=HubPublicIPAddresses(
count=hub_public_ip_count
)
) if hub_public_ip_count is not None else None)
if private_ranges is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['Network.SNAT.PrivateRanges'] = private_ranges
if sku is None or sku.lower() == 'azfw_vnet':
if firewall_policy is None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
if enable_dns_proxy is not None:
# service side requires lowercase
firewall.additional_properties['Network.DNS.EnableProxy'] = str(enable_dns_proxy).lower()
if dns_servers is not None:
firewall.additional_properties['Network.DNS.Servers'] = ','.join(dns_servers or '')
if allow_active_ftp:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['Network.FTP.AllowActiveFTP'] = "true"
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
# pylint: disable=too-many-branches
def update_azure_firewall(cmd, instance, tags=None, zones=None, private_ranges=None,
firewall_policy=None, virtual_hub=None,
dns_servers=None, enable_dns_proxy=None,
threat_intel_mode=None, hub_public_ip_addresses=None,
hub_public_ip_count=None, allow_active_ftp=None):
if firewall_policy and any([enable_dns_proxy, dns_servers]):
raise CLIError('usage error: firewall policy and dns settings cannot co-exist.')
if all([hub_public_ip_addresses, hub_public_ip_count]):
raise CLIError('Cannot add and remove public ip addresses at same time.')
(SubResource,
AzureFirewallPublicIPAddress,
HubIPAddresses,
HubPublicIPAddresses) = cmd.get_models('SubResource',
'AzureFirewallPublicIPAddress',
'HubIPAddresses',
'HubPublicIPAddresses')
if tags is not None:
instance.tags = tags
if zones is not None:
instance.zones = zones
if private_ranges is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['Network.SNAT.PrivateRanges'] = private_ranges
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if virtual_hub is not None:
if virtual_hub == '':
instance.virtual_hub = None
else:
instance.virtual_hub = SubResource(id=virtual_hub)
if enable_dns_proxy is not None:
# service side requires lowercase
instance.additional_properties['Network.DNS.EnableProxy'] = str(enable_dns_proxy).lower()
if dns_servers is not None:
instance.additional_properties['Network.DNS.Servers'] = ','.join(dns_servers or '')
if threat_intel_mode is not None:
instance.threat_intel_mode = threat_intel_mode
if instance.hub_ip_addresses is None and hub_public_ip_addresses is not None:
raise CLIError('Cannot delete public ip addresses from vhub without creation.')
if hub_public_ip_count is not None:
try:
if instance.hub_ip_addresses.public_i_ps.count is not None and hub_public_ip_count > instance.hub_ip_addresses.public_i_ps.count: # pylint: disable=line-too-long
instance.hub_ip_addresses.public_i_ps.count = hub_public_ip_count
else:
raise CLIError('Cannot decrease the count of hub ip addresses through --count.')
except AttributeError:
instance.hub_ip_addresses = HubIPAddresses(
public_i_ps=HubPublicIPAddresses(
count=hub_public_ip_count
)
)
if hub_public_ip_addresses is not None:
try:
if len(hub_public_ip_addresses) > instance.hub_ip_addresses.public_i_ps.count:
raise CLIError('Number of public ip addresses must be less than or equal to existing ones.')
instance.hub_ip_addresses.public_i_ps.addresses = [AzureFirewallPublicIPAddress(address=ip) for ip in hub_public_ip_addresses] # pylint: disable=line-too-long
instance.hub_ip_addresses.public_i_ps.count = len(hub_public_ip_addresses)
except AttributeError:
raise CLIError('Public Ip addresses must exist before deleting them.')
if allow_active_ftp is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
if allow_active_ftp:
instance.additional_properties['Network.FTP.AllowActiveFTP'] = "true"
elif 'Network.FTP.AllowActiveFTP' in instance.additional_properties:
del instance.additional_properties['Network.FTP.AllowActiveFTP']
return instance
def list_azure_firewalls(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'azure_firewalls', resource_group_name)
# pylint: disable=unused-argument
def create_af_ip_configuration(cmd, resource_group_name, azure_firewall_name, item_name,
public_ip_address, virtual_network_name=None, subnet='AzureFirewallSubnet',
management_item_name=None, management_public_ip_address=None,
management_virtual_network_name=None, management_subnet='AzureFirewallManagementSubnet'):
AzureFirewallIPConfiguration, SubResource = cmd.get_models('AzureFirewallIPConfiguration', 'SubResource')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
config = AzureFirewallIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
subnet=SubResource(id=subnet) if subnet else None
)
_upsert(af, 'ip_configurations', config, 'name', warn=False)
if management_item_name is not None:
management_config = AzureFirewallIPConfiguration(
name=management_item_name,
public_ip_address=SubResource(id=management_public_ip_address) if management_public_ip_address else None,
subnet=SubResource(id=management_subnet) if management_subnet else None
)
af.management_ip_configuration = management_config
poller = client.begin_create_or_update(resource_group_name, azure_firewall_name, af)
return _get_property(poller.result().ip_configurations, item_name)
def create_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name, item_name,
public_ip_address, virtual_network_name, # pylint: disable=unused-argument
subnet='AzureFirewallManagementSubnet'):
AzureFirewallIPConfiguration, SubResource = cmd.get_models('AzureFirewallIPConfiguration', 'SubResource')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
config = AzureFirewallIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
subnet=SubResource(id=subnet) if subnet else None
)
af.management_ip_configuration = config
poller = client.create_or_update(resource_group_name, azure_firewall_name, af)
return poller.result().management_ip_configuration
def update_af_management_ip_configuration(cmd, instance, public_ip_address=None, virtual_network_name=None, # pylint: disable=unused-argument
subnet='AzureFirewallManagementSubnet'):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.management_ip_configuration.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.management_ip_configuration.subnet = SubResource(id=subnet)
return instance
def set_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name, parameters):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
poller = client.create_or_update(resource_group_name, azure_firewall_name, parameters)
return poller.result().management_ip_configuration
def show_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
return af.management_ip_configuration
def delete_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
af.management_ip_configuration = None
poller = client.create_or_update(resource_group_name, azure_firewall_name, af)
return poller.result().management_ip_configuration
def delete_af_ip_configuration(cmd, resource_group_name, resource_name, item_name, no_wait=False): # pylint: disable=unused-argument
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, resource_name)
keep_items = \
[x for x in af.ip_configurations if x.name.lower() != item_name.lower()]
af.ip_configurations = keep_items if keep_items else None
if not keep_items:
if af.management_ip_configuration is not None:
logger.warning('Management ip configuration cannot exist without regular ip config. Delete it as well.')
af.management_ip_configuration = None
if no_wait:
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, af)
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, resource_name, af).result()
if next((x for x in getattr(result, 'ip_configurations') if x.name.lower() == item_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(item_name, resource_name))
def build_af_rule_list(item_param_name, collection_param_name):
import sys
def list_func(cmd, resource_group_name, firewall_name, collection_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
return _find_item_at_path(af, '{}.{}'.format(collection_param_name, collection_name))
func_name = 'list_af_{}s'.format(item_param_name)
setattr(sys.modules[__name__], func_name, list_func)
return func_name
def build_af_rule_show(item_param_name, collection_param_name):
import sys
def show_func(cmd, resource_group_name, firewall_name, collection_name, item_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
return _find_item_at_path(af, '{}.{}.rules.{}'.format(collection_param_name, collection_name, item_name))
func_name = 'show_af_{}'.format(item_param_name)
setattr(sys.modules[__name__], func_name, show_func)
return func_name
def build_af_rule_delete(item_param_name, collection_param_name):
import sys
def delete_func(cmd, resource_group_name, firewall_name, collection_name, item_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
collection = _find_item_at_path(af, '{}.{}'.format(collection_param_name, collection_name))
collection.rules = [rule for rule in collection.rules if rule.name != item_name]
client.begin_create_or_update(resource_group_name, firewall_name, af)
func_name = 'delete_af_{}'.format(item_param_name)
setattr(sys.modules[__name__], func_name, delete_func)
return func_name
def _upsert_af_rule(cmd, resource_group_name, firewall_name, collection_param_name, collection_class,
item_class, item_name, params, collection_params):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
collection = getattr(af, collection_param_name, [])
collection_name = collection_params.get('name', '')
priority = collection_params.get('priority', None)
action = collection_params.get('action', None)
collection_match = next((x for x in collection if x.name.lower() == collection_name.lower()), None)
usage_error = CLIError("usage error: --collection-name EXISTING_NAME | --collection-name NEW_NAME --priority"
" INT --action ACTION")
if collection_match:
if any([priority, action['type']]):
logger.warning("Rule collection '%s' already exists.", collection_params['name'])
raise usage_error
else:
if not all([priority, action['type']]):
logger.warning("Rule collection '%s' does not exist and needs to be created.", collection_params['name'])
raise usage_error
# create new collection
logger.warning("Creating rule collection '%s'.", collection_params['name'])
collection_match = collection_class(**collection_params)
collection_match.rules = []
collection_match.rules.append(item_class(**params))
_upsert(af, collection_param_name, collection_match, 'name', warn=False)
af = client.begin_create_or_update(resource_group_name, firewall_name, af).result()
return _find_item_at_path(af, '{}.{}.rules.{}'.format(collection_param_name, collection_name, item_name))
def create_af_network_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
destination_ports, protocols, destination_fqdns=None, source_addresses=None,
destination_addresses=None, description=None, priority=None, action=None,
source_ip_groups=None, destination_ip_groups=None):
AzureFirewallNetworkRule, AzureFirewallNetworkRuleCollection = cmd.get_models(
'AzureFirewallNetworkRule', 'AzureFirewallNetworkRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'destination_addresses': destination_addresses,
'destination_ports': destination_ports,
'destination_fqdns': destination_fqdns,
'protocols': protocols,
'destination_ip_groups': destination_ip_groups,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'network_rule_collections', AzureFirewallNetworkRuleCollection, AzureFirewallNetworkRule,
item_name, params, collection_params)
def create_af_nat_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
destination_addresses, destination_ports, protocols, translated_port, source_addresses=None,
translated_address=None, translated_fqdn=None, description=None, priority=None, action=None,
source_ip_groups=None):
AzureFirewallNatRule, AzureFirewallNatRuleCollection = cmd.get_models(
'AzureFirewallNatRule', 'AzureFirewallNatRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'destination_addresses': destination_addresses,
'destination_ports': destination_ports,
'protocols': protocols,
'translated_address': translated_address,
'translated_port': translated_port,
'translated_fqdn': translated_fqdn,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'nat_rule_collections', AzureFirewallNatRuleCollection, AzureFirewallNatRule,
item_name, params, collection_params)
def create_af_application_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
protocols, description=None, source_addresses=None, target_fqdns=None,
fqdn_tags=None, priority=None, action=None, source_ip_groups=None):
AzureFirewallApplicationRule, AzureFirewallApplicationRuleCollection = cmd.get_models(
'AzureFirewallApplicationRule', 'AzureFirewallApplicationRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'protocols': protocols,
'target_fqdns': target_fqdns,
'fqdn_tags': fqdn_tags,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'application_rule_collections', AzureFirewallApplicationRuleCollection,
AzureFirewallApplicationRule, item_name, params, collection_params)
def create_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name,
ip_addresses=None, fqdns=None):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if ip_addresses is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['ThreatIntel.Whitelist.IpAddresses'] = ip_addresses
if fqdns is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['ThreatIntel.Whitelist.FQDNs'] = fqdns
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
def update_azure_firewall_threat_intel_allowlist(instance, ip_addresses=None, fqdns=None):
if ip_addresses is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['ThreatIntel.Whitelist.IpAddresses'] = ip_addresses
if fqdns is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['ThreatIntel.Whitelist.FQDNs'] = fqdns
return instance
def show_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if firewall.additional_properties is None:
firewall.additional_properties = {}
return firewall.additional_properties
def delete_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if firewall.additional_properties is not None:
firewall.additional_properties.pop('ThreatIntel.Whitelist.IpAddresses', None)
firewall.additional_properties.pop('ThreatIntel.Whitelist.FQDNs', None)
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
# endregion
# region AzureFirewallPolicies
def create_azure_firewall_policies(cmd, resource_group_name, firewall_policy_name, base_policy=None,
threat_intel_mode=None, location=None, tags=None, ip_addresses=None,
fqdns=None,
dns_servers=None, enable_dns_proxy=None,
sku=None, intrusion_detection_mode=None,
key_vault_secret_id=None, certificate_name=None, user_assigned_identity=None):
client = network_client_factory(cmd.cli_ctx).firewall_policies
(FirewallPolicy,
SubResource,
FirewallPolicyThreatIntelWhitelist,
DnsSettings,
FirewallPolicySku,
ManagedServiceIdentityUserAssignedIdentitiesValue,
ManagedServiceIdentity) = cmd.get_models('FirewallPolicy',
'SubResource',
'FirewallPolicyThreatIntelWhitelist',
'DnsSettings',
'FirewallPolicySku',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ManagedServiceIdentity')
firewall_policy = FirewallPolicy(base_policy=SubResource(id=base_policy) if base_policy is not None else None,
threat_intel_mode=threat_intel_mode,
location=location,
tags=tags)
threat_intel_allowlist = FirewallPolicyThreatIntelWhitelist(ip_addresses=ip_addresses,
fqdns=fqdns) if ip_addresses and fqdns else None
firewall_policy.threat_intel_whitelist = threat_intel_allowlist
if cmd.supported_api_version(min_api='2020-05-01'):
if any([dns_servers, enable_dns_proxy]):
dns_settings = DnsSettings(servers=dns_servers,
enable_proxy=enable_dns_proxy or False)
firewall_policy.dns_settings = dns_settings
if cmd.supported_api_version(min_api='2020-07-01'):
if sku is not None:
firewall_policy.sku = FirewallPolicySku(tier=sku)
if intrusion_detection_mode is not None:
(FirewallPolicyIntrusionDetection,
FirewallPolicyIntrusionDetectionConfiguration) = \
cmd.get_models('FirewallPolicyIntrusionDetection',
'FirewallPolicyIntrusionDetectionConfiguration')
firewall_policy.intrusion_detection = FirewallPolicyIntrusionDetection(
mode=intrusion_detection_mode,
configuration=FirewallPolicyIntrusionDetectionConfiguration()
)
if certificate_name is not None and key_vault_secret_id is not None:
FirewallPolicyTransportSecurity, FirewallPolicyCertificateAuthority = \
cmd.get_models('FirewallPolicyTransportSecurity', 'FirewallPolicyCertificateAuthority')
certificate_auth = FirewallPolicyCertificateAuthority(key_vault_secret_id=key_vault_secret_id,
name=certificate_name)
firewall_policy.transport_security = FirewallPolicyTransportSecurity(certificate_authority=certificate_auth)
# identity
if user_assigned_identity is not None:
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
firewall_policy.identity = identity_instance
return client.begin_create_or_update(resource_group_name, firewall_policy_name, firewall_policy)
def update_azure_firewall_policies(cmd,
instance, tags=None, threat_intel_mode=None, ip_addresses=None,
fqdns=None,
dns_servers=None, enable_dns_proxy=None,
sku=None, intrusion_detection_mode=None,
key_vault_secret_id=None, certificate_name=None, user_assigned_identity=None):
(FirewallPolicyThreatIntelWhitelist, FirewallPolicySku) = cmd.get_models('FirewallPolicyThreatIntelWhitelist', 'FirewallPolicySku')
if tags is not None:
instance.tags = tags
if threat_intel_mode is not None:
instance.threat_intel_mode = threat_intel_mode
if cmd.supported_api_version(min_api='2020-05-01'):
if instance.dns_settings is None and any([dns_servers, enable_dns_proxy]):
DnsSettings = cmd.get_models('DnsSettings')
instance.dns_settings = DnsSettings()
if dns_servers is not None:
instance.dns_settings.servers = dns_servers
if enable_dns_proxy is not None:
instance.dns_settings.enable_proxy = enable_dns_proxy
if instance.threat_intel_whitelist is None and any([ip_addresses, fqdns]):
instance.threat_intel_whitelist = FirewallPolicyThreatIntelWhitelist(ip_addresses=ip_addresses,
fqnds=fqdns)
if ip_addresses is not None:
instance.threat_intel_whitelist.ip_addresses = ip_addresses
if fqdns is not None:
instance.threat_intel_whitelist.fqdns = fqdns
if cmd.supported_api_version(min_api='2020-07-01'):
if sku is not None:
instance.sku = FirewallPolicySku(tier=sku)
if intrusion_detection_mode is not None:
if instance.intrusion_detection is not None:
instance.intrusion_detection.mode = intrusion_detection_mode
else:
(FirewallPolicyIntrusionDetection, FirewallPolicyIntrusionDetectionConfiguration) = \
cmd.get_models('FirewallPolicyIntrusionDetection', 'FirewallPolicyIntrusionDetectionConfiguration')
instance.intrusion_detection = FirewallPolicyIntrusionDetection(
mode=intrusion_detection_mode,
configuration=FirewallPolicyIntrusionDetectionConfiguration()
)
if certificate_name is not None and key_vault_secret_id is not None:
FirewallPolicyTransportSecurity, FirewallPolicyCertificateAuthority = \
cmd.get_models('FirewallPolicyTransportSecurity', 'FirewallPolicyCertificateAuthority')
certificate_auth = FirewallPolicyCertificateAuthority(key_vault_secret_id=key_vault_secret_id,
name=certificate_name)
instance.transport_security = FirewallPolicyTransportSecurity(certificate_authority=certificate_auth)
# identity
(ManagedServiceIdentityUserAssignedIdentitiesValue,
ManagedServiceIdentity) = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ManagedServiceIdentity')
if user_assigned_identity is not None:
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
instance.identity = identity_instance
return instance
def set_azure_firewall_policies(cmd, resource_group_name, firewall_policy_name, parameters):
if parameters.identity is None:
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
identity = ManagedServiceIdentity(type="None", user_assigned_identities=None)
parameters.identity = identity
client = network_client_factory(cmd.cli_ctx).firewall_policies
return client.begin_create_or_update(resource_group_name, firewall_policy_name, parameters)
def list_azure_firewall_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).firewall_policies
if resource_group_name is not None:
return client.list(resource_group_name)
return client.list_all()
def add_firewall_policy_intrusion_detection_config(cmd,
resource_group_name,
firewall_policy_name,
signature_id=None,
signature_mode=None,
bypass_rule_name=None,
bypass_rule_description=None,
bypass_rule_protocol=None,
bypass_rule_source_addresses=None,
bypass_rule_destination_addresses=None,
bypass_rule_destination_ports=None,
bypass_rule_source_ip_groups=None,
bypass_rule_destination_ip_groups=None):
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
raise RequiredArgumentMissingError('Intrusion detection mode is not set. Setting it by update command first')
if signature_id is not None and signature_mode is not None:
for overrided_signature in firewall_policy.intrusion_detection.configuration.signature_overrides:
if overrided_signature.id == signature_id:
raise InvalidArgumentValueError(
'Signature ID {} exists. Delete it first or try update instead'.format(signature_id))
FirewallPolicyIntrusionDetectionSignatureSpecification = \
cmd.get_models('FirewallPolicyIntrusionDetectionSignatureSpecification')
signature_override = FirewallPolicyIntrusionDetectionSignatureSpecification(
id=signature_id,
mode=signature_mode
)
firewall_policy.intrusion_detection.configuration.signature_overrides.append(signature_override)
if bypass_rule_name is not None:
FirewallPolicyIntrusionDetectionBypassTrafficSpecifications = \
cmd.get_models('FirewallPolicyIntrusionDetectionBypassTrafficSpecifications')
bypass_traffic = FirewallPolicyIntrusionDetectionBypassTrafficSpecifications(
name=bypass_rule_name,
description=bypass_rule_description,
protocol=bypass_rule_protocol,
source_addresses=bypass_rule_source_addresses,
destination_addresses=bypass_rule_destination_addresses,
destination_ports=bypass_rule_destination_ports,
source_ip_groups=bypass_rule_source_ip_groups,
destination_ip_groups=bypass_rule_destination_ip_groups,
)
firewall_policy.intrusion_detection.configuration.bypass_traffic_settings.append(bypass_traffic)
result = sdk_no_wait(False,
client.begin_create_or_update,
resource_group_name,
firewall_policy_name,
firewall_policy).result()
return result.intrusion_detection.configuration
def list_firewall_policy_intrusion_detection_config(cmd, resource_group_name, firewall_policy_name):
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
return []
return firewall_policy.intrusion_detection.configuration
def remove_firewall_policy_intrusion_detection_config(cmd,
resource_group_name,
firewall_policy_name,
signature_id=None,
bypass_rule_name=None):
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
raise RequiredArgumentMissingError('Intrusion detection mode is not set. Setting it by update command first')
if signature_id is not None:
signatures = firewall_policy.intrusion_detection.configuration.signature_overrides
new_signatures = [s for s in signatures if s.id != signature_id]
if len(signatures) == len(new_signatures):
raise InvalidArgumentValueError("Signature ID {} doesn't exist".format(signature_id))
firewall_policy.intrusion_detection.configuration.signature_overrides = new_signatures
if bypass_rule_name is not None:
bypass_settings = firewall_policy.intrusion_detection.configuration.bypass_traffic_settings
new_bypass_settings = [s for s in bypass_settings if s.name != bypass_rule_name]
if len(bypass_settings) == len(new_bypass_settings):
raise InvalidArgumentValueError("Bypass rule with name {} doesn't exist".format(signature_id))
firewall_policy.intrusion_detection.configuration.bypass_traffic_settings = new_bypass_settings
result = sdk_no_wait(False,
client.begin_create_or_update,
resource_group_name,
firewall_policy_name,
firewall_policy).result()
return result.intrusion_detection.configuration
def create_azure_firewall_policy_rule_collection_group(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, priority):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
FirewallPolicyRuleCollectionGroup = cmd.get_models('FirewallPolicyRuleCollectionGroup')
rule_group = FirewallPolicyRuleCollectionGroup(priority=priority,
name=rule_collection_group_name)
return client.begin_create_or_update(resource_group_name, firewall_policy_name, rule_collection_group_name, rule_group)
def update_azure_firewall_policy_rule_collection_group(instance, priority=None, tags=None):
if tags is not None:
instance.tags = tags
if priority is not None:
instance.priority = priority
return instance
def add_azure_firewall_policy_nat_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_priority, translated_address=None,
translated_fqdn=None, translated_port=None, nat_action=None,
rule_name=None, description=None, ip_protocols=None,
source_addresses=None, destination_addresses=None,
destination_ports=None, source_ip_groups=None):
FirewallPolicyNatRuleCollection, FirewallPolicyNatRuleCollectionAction, \
NatRule, FirewallPolicyRuleNetworkProtocol = \
cmd.get_models('FirewallPolicyNatRuleCollection', 'FirewallPolicyNatRuleCollectionAction',
'NatRule', 'FirewallPolicyRuleNetworkProtocol')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
ip_protocols = list(map(FirewallPolicyRuleNetworkProtocol, ip_protocols))
nat_rule = NatRule(name=rule_name,
description=description,
rule_type="NatRule",
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
translated_address=translated_address,
translated_fqdn=translated_fqdn,
translated_port=translated_port,
source_ip_groups=source_ip_groups)
nat_rule_collection = FirewallPolicyNatRuleCollection(name=rule_collection_name,
priority=rule_priority,
rule_collection_type="FirewallPolicyNatRuleCollection",
action=FirewallPolicyNatRuleCollectionAction(
type=nat_action
),
rules=[nat_rule])
rule_collection_group.rule_collections.append(nat_rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
# pylint: disable=too-many-locals
def add_azure_firewall_policy_filter_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_name,
rule_priority, filter_action=None, rule_name=None,
rule_type=None, description=None, ip_protocols=None,
source_addresses=None, destination_addresses=None,
destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
target_urls=None, enable_tls_inspection=False, web_categories=None):
NetworkRule, FirewallPolicyRuleApplicationProtocol,\
ApplicationRule, FirewallPolicyFilterRuleCollectionAction, FirewallPolicyFilterRuleCollection =\
cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'FirewallPolicyFilterRuleCollectionAction',
'FirewallPolicyFilterRuleCollection')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
rule = None
if rule_type == "NetworkRule":
rule = NetworkRule(name=rule_name,
description=description,
rule_type=rule_type,
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
source_ip_groups=source_ip_groups,
destination_ip_groups=destination_ip_groups)
else:
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
rule = ApplicationRule(name=rule_name,
description=description,
rule_type=rule_type,
source_addresses=source_addresses,
protocols=protocols,
destination_addresses=destination_addresses,
fqdn_tags=fqdn_tags,
target_fqdns=target_fqdns,
target_urls=target_urls,
source_ip_groups=source_ip_groups,
terminate_tls=enable_tls_inspection,
web_categories=web_categories)
filter_rule_collection = FirewallPolicyFilterRuleCollection(name=rule_collection_name,
priority=rule_priority,
rule_collection_type="FirewallPolicyFilterRule",
action=FirewallPolicyFilterRuleCollectionAction(
type=filter_action
),
rules=[rule])
rule_collection_group.rule_collections.append(filter_rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def remove_azure_firewall_policy_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
rule_collection_group.rule_collections.remove(rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def list_azure_firewall_policy_rule_collection(cmd, resource_group_name,
firewall_policy_name, rule_collection_group_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
return rule_collection_group.rule_collections
# pylint: disable=too-many-locals
def add_azure_firewall_policy_filter_rule(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_name, rule_type,
description=None, ip_protocols=None, source_addresses=None,
destination_addresses=None, destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
translated_address=None, translated_port=None,
target_urls=None, enable_tls_inspection=False, web_categories=None):
(NetworkRule,
FirewallPolicyRuleApplicationProtocol,
ApplicationRule,
NatRule) = cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'NatRule')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
target_rule_collection = None
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise CLIError("Cannot find corresponding rule.")
if target_rule_collection.rule_collection_type == "FirewallPolicyFilterRule" and rule_type == 'NatRule':
raise CLIError("FirewallPolicyFilterRule doesn't support Nat rule.")
if target_rule_collection.rule_collection_type == "FirewallPolicyNatRule" and rule_type in ['NetworkRule',
'ApplicationRule']:
raise CLIError("FirewallPolicyNatRule supports neither Network rule nor Application rule.")
rule = None
if rule_type == "NetworkRule":
rule = NetworkRule(name=rule_name,
description=description,
rule_type=rule_type,
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
source_ip_groups=source_ip_groups,
destination_ip_groups=destination_ip_groups)
elif rule_type == 'ApplicationRule':
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
rule = ApplicationRule(name=rule_name,
description=description,
rule_type=rule_type,
source_addresses=source_addresses,
protocols=protocols,
destination_addresses=destination_addresses,
fqdn_tags=fqdn_tags,
target_fqdns=target_fqdns,
target_urls=target_urls,
source_ip_groups=source_ip_groups,
terminate_tls=enable_tls_inspection,
web_categories=web_categories)
elif rule_type == 'NatRule':
rule = NatRule(name=rule_name,
description=description,
rule_type="NatRule",
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
translated_address=translated_address,
translated_port=translated_port,
source_ip_groups=source_ip_groups)
target_rule_collection.rules.append(rule)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def remove_azure_firewall_policy_filter_rule(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
target_rule_collection = None
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise CLIError("Cannot find corresponding rule collection.")
for rule in target_rule_collection.rules:
if rule.name == rule_name:
target_rule_collection.rules.remove(rule)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
# pylint: disable=too-many-locals
def update_azure_firewall_policy_filter_rule(cmd, instance, rule_collection_name, rule_name,
description=None, ip_protocols=None, source_addresses=None,
destination_addresses=None, destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
translated_address=None, translated_port=None,
target_urls=None, enable_tls_inspection=None, web_categories=None):
(NetworkRule,
FirewallPolicyRuleApplicationProtocol,
ApplicationRule,
NatRule) = cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'NatRule')
target_rule_collection = None
for rule_collection in instance.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise UserFault("Cannot find corresponding rule, please check parameters")
for i in range(0, len(target_rule_collection.rules)):
rule = target_rule_collection.rules[i]
if rule_name == rule.name:
new_rule = {}
if rule.rule_type == "NetworkRule":
new_rule = NetworkRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
ip_protocols=(ip_protocols or rule.ip_protocols),
source_addresses=(source_addresses or rule.source_addresses),
destination_addresses=(destination_addresses or rule.destination_addresses),
destination_ports=(destination_ports or rule.destination_ports),
source_ip_groups=(source_ip_groups or rule.source_ip_groups),
destination_ip_groups=(destination_ip_groups or rule.destination_ip_groups))
elif rule.rule_type == 'ApplicationRule':
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
new_rule = ApplicationRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
source_addresses=(source_addresses or rule.source_addresses),
protocols=(protocols or rule.protocols),
destination_addresses=(destination_addresses or rule.destination_addresses),
fqdn_tags=(fqdn_tags or rule.fqdn_tags),
target_fqdns=(target_fqdns or rule.target_fqdns),
target_urls=(target_urls or rule.target_urls),
source_ip_groups=(source_ip_groups or rule.source_ip_groups),
terminate_tls=(enable_tls_inspection or rule.terminate_tls),
web_categories=(web_categories or rule.web_categories))
elif rule.rule_type == 'NatRule':
new_rule = NatRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
ip_protocols=(ip_protocols or rule.ip_protocols),
source_addresses=(source_addresses or rule.source_addresses),
destination_addresses=(destination_addresses or rule.destination_addresses),
destination_ports=(destination_ports or rule.destination_ports),
translated_address=(translated_address or rule.translated_address),
translated_port=(translated_port or rule.translated_port),
source_ip_groups=(source_ip_groups or rule.source_ip_groups))
if new_rule:
target_rule_collection.rules[i] = copy.deepcopy(new_rule)
return instance
else:
raise ServiceError(f'Undefined rule_type : {rule.rule_type}')
raise UserFault(f'{rule_name} does not exist!!!')
# endregion
| 55.806482 | 174 | 0.649084 |
import copy
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.util import sdk_no_wait
from azure.cli.core.azclierror import UserFault, ServiceError
from ._client_factory import network_client_factory
logger = get_logger(__name__)
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def _get_property(items, name):
result = next((x for x in items if x.name.lower() == name.lower()), None)
if not result:
raise CLIError("Property '{}' does not exist".format(name))
return result
def _upsert(parent, collection_name, obj_to_add, key_name, warn=True):
if not getattr(parent, collection_name, None):
setattr(parent, collection_name, [])
collection = getattr(parent, collection_name, None)
value = getattr(obj_to_add, key_name)
if value is None:
raise CLIError(
"Unable to resolve a value for key '{}' with which to match.".format(key_name))
match = next((x for x in collection if getattr(x, key_name, None) == value), None)
if match:
if warn:
logger.warning("Item '%s' already exists. Replacing with new values.", value)
collection.remove(match)
collection.append(obj_to_add)
def _find_item_at_path(instance, path):
curr_item = instance
path_comps = path.split('.')
for i, comp in enumerate(path_comps):
if i % 2:
curr_item = next((x for x in curr_item if x.name == comp), None)
else:
curr_item = getattr(curr_item, comp, None)
if not curr_item:
raise CLIError("unable to find '{}'...".format(comp))
return curr_item
def create_azure_firewall(cmd, resource_group_name, azure_firewall_name, location=None,
tags=None, zones=None, private_ranges=None, firewall_policy=None,
virtual_hub=None, sku=None,
dns_servers=None, enable_dns_proxy=None,
threat_intel_mode=None, hub_public_ip_count=None, allow_active_ftp=None, tier=None):
if firewall_policy and any([enable_dns_proxy, dns_servers]):
raise CLIError('usage error: firewall policy and dns settings cannot co-exist.')
if sku and sku.lower() == 'azfw_hub' and not all([virtual_hub, hub_public_ip_count]):
raise CLIError('usage error: virtual hub and hub ip addresses are mandatory for azure firewall on virtual hub.')
if sku and sku.lower() == 'azfw_hub' and allow_active_ftp:
raise CLIError('usage error: allow active ftp is not allowed for azure firewall on virtual hub.')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
(AzureFirewall,
SubResource,
AzureFirewallSku,
HubIPAddresses,
HubPublicIPAddresses) = cmd.get_models('AzureFirewall',
'SubResource',
'AzureFirewallSku',
'HubIPAddresses',
'HubPublicIPAddresses')
sku_instance = AzureFirewallSku(name=sku, tier=tier)
firewall = AzureFirewall(location=location,
tags=tags,
zones=zones,
additional_properties={},
virtual_hub=SubResource(id=virtual_hub) if virtual_hub is not None else None,
firewall_policy=SubResource(id=firewall_policy) if firewall_policy is not None else None,
sku=sku_instance if sku is not None else None,
threat_intel_mode=threat_intel_mode,
hub_ip_addresses=HubIPAddresses(
public_i_ps=HubPublicIPAddresses(
count=hub_public_ip_count
)
) if hub_public_ip_count is not None else None)
if private_ranges is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['Network.SNAT.PrivateRanges'] = private_ranges
if sku is None or sku.lower() == 'azfw_vnet':
if firewall_policy is None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
if enable_dns_proxy is not None:
firewall.additional_properties['Network.DNS.EnableProxy'] = str(enable_dns_proxy).lower()
if dns_servers is not None:
firewall.additional_properties['Network.DNS.Servers'] = ','.join(dns_servers or '')
if allow_active_ftp:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['Network.FTP.AllowActiveFTP'] = "true"
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
def update_azure_firewall(cmd, instance, tags=None, zones=None, private_ranges=None,
firewall_policy=None, virtual_hub=None,
dns_servers=None, enable_dns_proxy=None,
threat_intel_mode=None, hub_public_ip_addresses=None,
hub_public_ip_count=None, allow_active_ftp=None):
if firewall_policy and any([enable_dns_proxy, dns_servers]):
raise CLIError('usage error: firewall policy and dns settings cannot co-exist.')
if all([hub_public_ip_addresses, hub_public_ip_count]):
raise CLIError('Cannot add and remove public ip addresses at same time.')
(SubResource,
AzureFirewallPublicIPAddress,
HubIPAddresses,
HubPublicIPAddresses) = cmd.get_models('SubResource',
'AzureFirewallPublicIPAddress',
'HubIPAddresses',
'HubPublicIPAddresses')
if tags is not None:
instance.tags = tags
if zones is not None:
instance.zones = zones
if private_ranges is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['Network.SNAT.PrivateRanges'] = private_ranges
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if virtual_hub is not None:
if virtual_hub == '':
instance.virtual_hub = None
else:
instance.virtual_hub = SubResource(id=virtual_hub)
if enable_dns_proxy is not None:
instance.additional_properties['Network.DNS.EnableProxy'] = str(enable_dns_proxy).lower()
if dns_servers is not None:
instance.additional_properties['Network.DNS.Servers'] = ','.join(dns_servers or '')
if threat_intel_mode is not None:
instance.threat_intel_mode = threat_intel_mode
if instance.hub_ip_addresses is None and hub_public_ip_addresses is not None:
raise CLIError('Cannot delete public ip addresses from vhub without creation.')
if hub_public_ip_count is not None:
try:
if instance.hub_ip_addresses.public_i_ps.count is not None and hub_public_ip_count > instance.hub_ip_addresses.public_i_ps.count:
instance.hub_ip_addresses.public_i_ps.count = hub_public_ip_count
else:
raise CLIError('Cannot decrease the count of hub ip addresses through --count.')
except AttributeError:
instance.hub_ip_addresses = HubIPAddresses(
public_i_ps=HubPublicIPAddresses(
count=hub_public_ip_count
)
)
if hub_public_ip_addresses is not None:
try:
if len(hub_public_ip_addresses) > instance.hub_ip_addresses.public_i_ps.count:
raise CLIError('Number of public ip addresses must be less than or equal to existing ones.')
instance.hub_ip_addresses.public_i_ps.addresses = [AzureFirewallPublicIPAddress(address=ip) for ip in hub_public_ip_addresses]
instance.hub_ip_addresses.public_i_ps.count = len(hub_public_ip_addresses)
except AttributeError:
raise CLIError('Public Ip addresses must exist before deleting them.')
if allow_active_ftp is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
if allow_active_ftp:
instance.additional_properties['Network.FTP.AllowActiveFTP'] = "true"
elif 'Network.FTP.AllowActiveFTP' in instance.additional_properties:
del instance.additional_properties['Network.FTP.AllowActiveFTP']
return instance
def list_azure_firewalls(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'azure_firewalls', resource_group_name)
def create_af_ip_configuration(cmd, resource_group_name, azure_firewall_name, item_name,
public_ip_address, virtual_network_name=None, subnet='AzureFirewallSubnet',
management_item_name=None, management_public_ip_address=None,
management_virtual_network_name=None, management_subnet='AzureFirewallManagementSubnet'):
AzureFirewallIPConfiguration, SubResource = cmd.get_models('AzureFirewallIPConfiguration', 'SubResource')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
config = AzureFirewallIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
subnet=SubResource(id=subnet) if subnet else None
)
_upsert(af, 'ip_configurations', config, 'name', warn=False)
if management_item_name is not None:
management_config = AzureFirewallIPConfiguration(
name=management_item_name,
public_ip_address=SubResource(id=management_public_ip_address) if management_public_ip_address else None,
subnet=SubResource(id=management_subnet) if management_subnet else None
)
af.management_ip_configuration = management_config
poller = client.begin_create_or_update(resource_group_name, azure_firewall_name, af)
return _get_property(poller.result().ip_configurations, item_name)
def create_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name, item_name,
public_ip_address, virtual_network_name,
subnet='AzureFirewallManagementSubnet'):
AzureFirewallIPConfiguration, SubResource = cmd.get_models('AzureFirewallIPConfiguration', 'SubResource')
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
config = AzureFirewallIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
subnet=SubResource(id=subnet) if subnet else None
)
af.management_ip_configuration = config
poller = client.create_or_update(resource_group_name, azure_firewall_name, af)
return poller.result().management_ip_configuration
def update_af_management_ip_configuration(cmd, instance, public_ip_address=None, virtual_network_name=None,
subnet='AzureFirewallManagementSubnet'):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.management_ip_configuration.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.management_ip_configuration.subnet = SubResource(id=subnet)
return instance
def set_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name, parameters):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
poller = client.create_or_update(resource_group_name, azure_firewall_name, parameters)
return poller.result().management_ip_configuration
def show_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
return af.management_ip_configuration
def delete_af_management_ip_configuration(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, azure_firewall_name)
af.management_ip_configuration = None
poller = client.create_or_update(resource_group_name, azure_firewall_name, af)
return poller.result().management_ip_configuration
def delete_af_ip_configuration(cmd, resource_group_name, resource_name, item_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, resource_name)
keep_items = \
[x for x in af.ip_configurations if x.name.lower() != item_name.lower()]
af.ip_configurations = keep_items if keep_items else None
if not keep_items:
if af.management_ip_configuration is not None:
logger.warning('Management ip configuration cannot exist without regular ip config. Delete it as well.')
af.management_ip_configuration = None
if no_wait:
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, resource_name, af)
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, resource_name, af).result()
if next((x for x in getattr(result, 'ip_configurations') if x.name.lower() == item_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(item_name, resource_name))
def build_af_rule_list(item_param_name, collection_param_name):
import sys
def list_func(cmd, resource_group_name, firewall_name, collection_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
return _find_item_at_path(af, '{}.{}'.format(collection_param_name, collection_name))
func_name = 'list_af_{}s'.format(item_param_name)
setattr(sys.modules[__name__], func_name, list_func)
return func_name
def build_af_rule_show(item_param_name, collection_param_name):
import sys
def show_func(cmd, resource_group_name, firewall_name, collection_name, item_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
return _find_item_at_path(af, '{}.{}.rules.{}'.format(collection_param_name, collection_name, item_name))
func_name = 'show_af_{}'.format(item_param_name)
setattr(sys.modules[__name__], func_name, show_func)
return func_name
def build_af_rule_delete(item_param_name, collection_param_name):
import sys
def delete_func(cmd, resource_group_name, firewall_name, collection_name, item_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
collection = _find_item_at_path(af, '{}.{}'.format(collection_param_name, collection_name))
collection.rules = [rule for rule in collection.rules if rule.name != item_name]
client.begin_create_or_update(resource_group_name, firewall_name, af)
func_name = 'delete_af_{}'.format(item_param_name)
setattr(sys.modules[__name__], func_name, delete_func)
return func_name
def _upsert_af_rule(cmd, resource_group_name, firewall_name, collection_param_name, collection_class,
item_class, item_name, params, collection_params):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
af = client.get(resource_group_name, firewall_name)
collection = getattr(af, collection_param_name, [])
collection_name = collection_params.get('name', '')
priority = collection_params.get('priority', None)
action = collection_params.get('action', None)
collection_match = next((x for x in collection if x.name.lower() == collection_name.lower()), None)
usage_error = CLIError("usage error: --collection-name EXISTING_NAME | --collection-name NEW_NAME --priority"
" INT --action ACTION")
if collection_match:
if any([priority, action['type']]):
logger.warning("Rule collection '%s' already exists.", collection_params['name'])
raise usage_error
else:
if not all([priority, action['type']]):
logger.warning("Rule collection '%s' does not exist and needs to be created.", collection_params['name'])
raise usage_error
logger.warning("Creating rule collection '%s'.", collection_params['name'])
collection_match = collection_class(**collection_params)
collection_match.rules = []
collection_match.rules.append(item_class(**params))
_upsert(af, collection_param_name, collection_match, 'name', warn=False)
af = client.begin_create_or_update(resource_group_name, firewall_name, af).result()
return _find_item_at_path(af, '{}.{}.rules.{}'.format(collection_param_name, collection_name, item_name))
def create_af_network_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
destination_ports, protocols, destination_fqdns=None, source_addresses=None,
destination_addresses=None, description=None, priority=None, action=None,
source_ip_groups=None, destination_ip_groups=None):
AzureFirewallNetworkRule, AzureFirewallNetworkRuleCollection = cmd.get_models(
'AzureFirewallNetworkRule', 'AzureFirewallNetworkRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'destination_addresses': destination_addresses,
'destination_ports': destination_ports,
'destination_fqdns': destination_fqdns,
'protocols': protocols,
'destination_ip_groups': destination_ip_groups,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'network_rule_collections', AzureFirewallNetworkRuleCollection, AzureFirewallNetworkRule,
item_name, params, collection_params)
def create_af_nat_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
destination_addresses, destination_ports, protocols, translated_port, source_addresses=None,
translated_address=None, translated_fqdn=None, description=None, priority=None, action=None,
source_ip_groups=None):
AzureFirewallNatRule, AzureFirewallNatRuleCollection = cmd.get_models(
'AzureFirewallNatRule', 'AzureFirewallNatRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'destination_addresses': destination_addresses,
'destination_ports': destination_ports,
'protocols': protocols,
'translated_address': translated_address,
'translated_port': translated_port,
'translated_fqdn': translated_fqdn,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'nat_rule_collections', AzureFirewallNatRuleCollection, AzureFirewallNatRule,
item_name, params, collection_params)
def create_af_application_rule(cmd, resource_group_name, azure_firewall_name, collection_name, item_name,
protocols, description=None, source_addresses=None, target_fqdns=None,
fqdn_tags=None, priority=None, action=None, source_ip_groups=None):
AzureFirewallApplicationRule, AzureFirewallApplicationRuleCollection = cmd.get_models(
'AzureFirewallApplicationRule', 'AzureFirewallApplicationRuleCollection')
params = {
'name': item_name,
'description': description,
'source_addresses': source_addresses,
'protocols': protocols,
'target_fqdns': target_fqdns,
'fqdn_tags': fqdn_tags,
'source_ip_groups': source_ip_groups
}
collection_params = {
'name': collection_name,
'priority': priority,
'action': {'type': action}
}
return _upsert_af_rule(cmd, resource_group_name, azure_firewall_name,
'application_rule_collections', AzureFirewallApplicationRuleCollection,
AzureFirewallApplicationRule, item_name, params, collection_params)
def create_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name,
ip_addresses=None, fqdns=None):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if ip_addresses is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['ThreatIntel.Whitelist.IpAddresses'] = ip_addresses
if fqdns is not None:
if firewall.additional_properties is None:
firewall.additional_properties = {}
firewall.additional_properties['ThreatIntel.Whitelist.FQDNs'] = fqdns
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
def update_azure_firewall_threat_intel_allowlist(instance, ip_addresses=None, fqdns=None):
if ip_addresses is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['ThreatIntel.Whitelist.IpAddresses'] = ip_addresses
if fqdns is not None:
if instance.additional_properties is None:
instance.additional_properties = {}
instance.additional_properties['ThreatIntel.Whitelist.FQDNs'] = fqdns
return instance
def show_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if firewall.additional_properties is None:
firewall.additional_properties = {}
return firewall.additional_properties
def delete_azure_firewall_threat_intel_allowlist(cmd, resource_group_name, azure_firewall_name):
client = network_client_factory(cmd.cli_ctx).azure_firewalls
firewall = client.get(resource_group_name=resource_group_name, azure_firewall_name=azure_firewall_name)
if firewall.additional_properties is not None:
firewall.additional_properties.pop('ThreatIntel.Whitelist.IpAddresses', None)
firewall.additional_properties.pop('ThreatIntel.Whitelist.FQDNs', None)
return client.begin_create_or_update(resource_group_name, azure_firewall_name, firewall)
def create_azure_firewall_policies(cmd, resource_group_name, firewall_policy_name, base_policy=None,
threat_intel_mode=None, location=None, tags=None, ip_addresses=None,
fqdns=None,
dns_servers=None, enable_dns_proxy=None,
sku=None, intrusion_detection_mode=None,
key_vault_secret_id=None, certificate_name=None, user_assigned_identity=None):
client = network_client_factory(cmd.cli_ctx).firewall_policies
(FirewallPolicy,
SubResource,
FirewallPolicyThreatIntelWhitelist,
DnsSettings,
FirewallPolicySku,
ManagedServiceIdentityUserAssignedIdentitiesValue,
ManagedServiceIdentity) = cmd.get_models('FirewallPolicy',
'SubResource',
'FirewallPolicyThreatIntelWhitelist',
'DnsSettings',
'FirewallPolicySku',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ManagedServiceIdentity')
firewall_policy = FirewallPolicy(base_policy=SubResource(id=base_policy) if base_policy is not None else None,
threat_intel_mode=threat_intel_mode,
location=location,
tags=tags)
threat_intel_allowlist = FirewallPolicyThreatIntelWhitelist(ip_addresses=ip_addresses,
fqdns=fqdns) if ip_addresses and fqdns else None
firewall_policy.threat_intel_whitelist = threat_intel_allowlist
if cmd.supported_api_version(min_api='2020-05-01'):
if any([dns_servers, enable_dns_proxy]):
dns_settings = DnsSettings(servers=dns_servers,
enable_proxy=enable_dns_proxy or False)
firewall_policy.dns_settings = dns_settings
if cmd.supported_api_version(min_api='2020-07-01'):
if sku is not None:
firewall_policy.sku = FirewallPolicySku(tier=sku)
if intrusion_detection_mode is not None:
(FirewallPolicyIntrusionDetection,
FirewallPolicyIntrusionDetectionConfiguration) = \
cmd.get_models('FirewallPolicyIntrusionDetection',
'FirewallPolicyIntrusionDetectionConfiguration')
firewall_policy.intrusion_detection = FirewallPolicyIntrusionDetection(
mode=intrusion_detection_mode,
configuration=FirewallPolicyIntrusionDetectionConfiguration()
)
if certificate_name is not None and key_vault_secret_id is not None:
FirewallPolicyTransportSecurity, FirewallPolicyCertificateAuthority = \
cmd.get_models('FirewallPolicyTransportSecurity', 'FirewallPolicyCertificateAuthority')
certificate_auth = FirewallPolicyCertificateAuthority(key_vault_secret_id=key_vault_secret_id,
name=certificate_name)
firewall_policy.transport_security = FirewallPolicyTransportSecurity(certificate_authority=certificate_auth)
if user_assigned_identity is not None:
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
firewall_policy.identity = identity_instance
return client.begin_create_or_update(resource_group_name, firewall_policy_name, firewall_policy)
def update_azure_firewall_policies(cmd,
instance, tags=None, threat_intel_mode=None, ip_addresses=None,
fqdns=None,
dns_servers=None, enable_dns_proxy=None,
sku=None, intrusion_detection_mode=None,
key_vault_secret_id=None, certificate_name=None, user_assigned_identity=None):
(FirewallPolicyThreatIntelWhitelist, FirewallPolicySku) = cmd.get_models('FirewallPolicyThreatIntelWhitelist', 'FirewallPolicySku')
if tags is not None:
instance.tags = tags
if threat_intel_mode is not None:
instance.threat_intel_mode = threat_intel_mode
if cmd.supported_api_version(min_api='2020-05-01'):
if instance.dns_settings is None and any([dns_servers, enable_dns_proxy]):
DnsSettings = cmd.get_models('DnsSettings')
instance.dns_settings = DnsSettings()
if dns_servers is not None:
instance.dns_settings.servers = dns_servers
if enable_dns_proxy is not None:
instance.dns_settings.enable_proxy = enable_dns_proxy
if instance.threat_intel_whitelist is None and any([ip_addresses, fqdns]):
instance.threat_intel_whitelist = FirewallPolicyThreatIntelWhitelist(ip_addresses=ip_addresses,
fqnds=fqdns)
if ip_addresses is not None:
instance.threat_intel_whitelist.ip_addresses = ip_addresses
if fqdns is not None:
instance.threat_intel_whitelist.fqdns = fqdns
if cmd.supported_api_version(min_api='2020-07-01'):
if sku is not None:
instance.sku = FirewallPolicySku(tier=sku)
if intrusion_detection_mode is not None:
if instance.intrusion_detection is not None:
instance.intrusion_detection.mode = intrusion_detection_mode
else:
(FirewallPolicyIntrusionDetection, FirewallPolicyIntrusionDetectionConfiguration) = \
cmd.get_models('FirewallPolicyIntrusionDetection', 'FirewallPolicyIntrusionDetectionConfiguration')
instance.intrusion_detection = FirewallPolicyIntrusionDetection(
mode=intrusion_detection_mode,
configuration=FirewallPolicyIntrusionDetectionConfiguration()
)
if certificate_name is not None and key_vault_secret_id is not None:
FirewallPolicyTransportSecurity, FirewallPolicyCertificateAuthority = \
cmd.get_models('FirewallPolicyTransportSecurity', 'FirewallPolicyCertificateAuthority')
certificate_auth = FirewallPolicyCertificateAuthority(key_vault_secret_id=key_vault_secret_id,
name=certificate_name)
instance.transport_security = FirewallPolicyTransportSecurity(certificate_authority=certificate_auth)
(ManagedServiceIdentityUserAssignedIdentitiesValue,
ManagedServiceIdentity) = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ManagedServiceIdentity')
if user_assigned_identity is not None:
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
instance.identity = identity_instance
return instance
def set_azure_firewall_policies(cmd, resource_group_name, firewall_policy_name, parameters):
if parameters.identity is None:
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
identity = ManagedServiceIdentity(type="None", user_assigned_identities=None)
parameters.identity = identity
client = network_client_factory(cmd.cli_ctx).firewall_policies
return client.begin_create_or_update(resource_group_name, firewall_policy_name, parameters)
def list_azure_firewall_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).firewall_policies
if resource_group_name is not None:
return client.list(resource_group_name)
return client.list_all()
def add_firewall_policy_intrusion_detection_config(cmd,
resource_group_name,
firewall_policy_name,
signature_id=None,
signature_mode=None,
bypass_rule_name=None,
bypass_rule_description=None,
bypass_rule_protocol=None,
bypass_rule_source_addresses=None,
bypass_rule_destination_addresses=None,
bypass_rule_destination_ports=None,
bypass_rule_source_ip_groups=None,
bypass_rule_destination_ip_groups=None):
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
raise RequiredArgumentMissingError('Intrusion detection mode is not set. Setting it by update command first')
if signature_id is not None and signature_mode is not None:
for overrided_signature in firewall_policy.intrusion_detection.configuration.signature_overrides:
if overrided_signature.id == signature_id:
raise InvalidArgumentValueError(
'Signature ID {} exists. Delete it first or try update instead'.format(signature_id))
FirewallPolicyIntrusionDetectionSignatureSpecification = \
cmd.get_models('FirewallPolicyIntrusionDetectionSignatureSpecification')
signature_override = FirewallPolicyIntrusionDetectionSignatureSpecification(
id=signature_id,
mode=signature_mode
)
firewall_policy.intrusion_detection.configuration.signature_overrides.append(signature_override)
if bypass_rule_name is not None:
FirewallPolicyIntrusionDetectionBypassTrafficSpecifications = \
cmd.get_models('FirewallPolicyIntrusionDetectionBypassTrafficSpecifications')
bypass_traffic = FirewallPolicyIntrusionDetectionBypassTrafficSpecifications(
name=bypass_rule_name,
description=bypass_rule_description,
protocol=bypass_rule_protocol,
source_addresses=bypass_rule_source_addresses,
destination_addresses=bypass_rule_destination_addresses,
destination_ports=bypass_rule_destination_ports,
source_ip_groups=bypass_rule_source_ip_groups,
destination_ip_groups=bypass_rule_destination_ip_groups,
)
firewall_policy.intrusion_detection.configuration.bypass_traffic_settings.append(bypass_traffic)
result = sdk_no_wait(False,
client.begin_create_or_update,
resource_group_name,
firewall_policy_name,
firewall_policy).result()
return result.intrusion_detection.configuration
def list_firewall_policy_intrusion_detection_config(cmd, resource_group_name, firewall_policy_name):
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
return []
return firewall_policy.intrusion_detection.configuration
def remove_firewall_policy_intrusion_detection_config(cmd,
resource_group_name,
firewall_policy_name,
signature_id=None,
bypass_rule_name=None):
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
client = network_client_factory(cmd.cli_ctx).firewall_policies
firewall_policy = client.get(resource_group_name, firewall_policy_name)
if firewall_policy.intrusion_detection is None:
raise RequiredArgumentMissingError('Intrusion detection mode is not set. Setting it by update command first')
if signature_id is not None:
signatures = firewall_policy.intrusion_detection.configuration.signature_overrides
new_signatures = [s for s in signatures if s.id != signature_id]
if len(signatures) == len(new_signatures):
raise InvalidArgumentValueError("Signature ID {} doesn't exist".format(signature_id))
firewall_policy.intrusion_detection.configuration.signature_overrides = new_signatures
if bypass_rule_name is not None:
bypass_settings = firewall_policy.intrusion_detection.configuration.bypass_traffic_settings
new_bypass_settings = [s for s in bypass_settings if s.name != bypass_rule_name]
if len(bypass_settings) == len(new_bypass_settings):
raise InvalidArgumentValueError("Bypass rule with name {} doesn't exist".format(signature_id))
firewall_policy.intrusion_detection.configuration.bypass_traffic_settings = new_bypass_settings
result = sdk_no_wait(False,
client.begin_create_or_update,
resource_group_name,
firewall_policy_name,
firewall_policy).result()
return result.intrusion_detection.configuration
def create_azure_firewall_policy_rule_collection_group(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, priority):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
FirewallPolicyRuleCollectionGroup = cmd.get_models('FirewallPolicyRuleCollectionGroup')
rule_group = FirewallPolicyRuleCollectionGroup(priority=priority,
name=rule_collection_group_name)
return client.begin_create_or_update(resource_group_name, firewall_policy_name, rule_collection_group_name, rule_group)
def update_azure_firewall_policy_rule_collection_group(instance, priority=None, tags=None):
if tags is not None:
instance.tags = tags
if priority is not None:
instance.priority = priority
return instance
def add_azure_firewall_policy_nat_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_priority, translated_address=None,
translated_fqdn=None, translated_port=None, nat_action=None,
rule_name=None, description=None, ip_protocols=None,
source_addresses=None, destination_addresses=None,
destination_ports=None, source_ip_groups=None):
FirewallPolicyNatRuleCollection, FirewallPolicyNatRuleCollectionAction, \
NatRule, FirewallPolicyRuleNetworkProtocol = \
cmd.get_models('FirewallPolicyNatRuleCollection', 'FirewallPolicyNatRuleCollectionAction',
'NatRule', 'FirewallPolicyRuleNetworkProtocol')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
ip_protocols = list(map(FirewallPolicyRuleNetworkProtocol, ip_protocols))
nat_rule = NatRule(name=rule_name,
description=description,
rule_type="NatRule",
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
translated_address=translated_address,
translated_fqdn=translated_fqdn,
translated_port=translated_port,
source_ip_groups=source_ip_groups)
nat_rule_collection = FirewallPolicyNatRuleCollection(name=rule_collection_name,
priority=rule_priority,
rule_collection_type="FirewallPolicyNatRuleCollection",
action=FirewallPolicyNatRuleCollectionAction(
type=nat_action
),
rules=[nat_rule])
rule_collection_group.rule_collections.append(nat_rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def add_azure_firewall_policy_filter_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_name,
rule_priority, filter_action=None, rule_name=None,
rule_type=None, description=None, ip_protocols=None,
source_addresses=None, destination_addresses=None,
destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
target_urls=None, enable_tls_inspection=False, web_categories=None):
NetworkRule, FirewallPolicyRuleApplicationProtocol,\
ApplicationRule, FirewallPolicyFilterRuleCollectionAction, FirewallPolicyFilterRuleCollection =\
cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'FirewallPolicyFilterRuleCollectionAction',
'FirewallPolicyFilterRuleCollection')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
rule = None
if rule_type == "NetworkRule":
rule = NetworkRule(name=rule_name,
description=description,
rule_type=rule_type,
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
source_ip_groups=source_ip_groups,
destination_ip_groups=destination_ip_groups)
else:
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
rule = ApplicationRule(name=rule_name,
description=description,
rule_type=rule_type,
source_addresses=source_addresses,
protocols=protocols,
destination_addresses=destination_addresses,
fqdn_tags=fqdn_tags,
target_fqdns=target_fqdns,
target_urls=target_urls,
source_ip_groups=source_ip_groups,
terminate_tls=enable_tls_inspection,
web_categories=web_categories)
filter_rule_collection = FirewallPolicyFilterRuleCollection(name=rule_collection_name,
priority=rule_priority,
rule_collection_type="FirewallPolicyFilterRule",
action=FirewallPolicyFilterRuleCollectionAction(
type=filter_action
),
rules=[rule])
rule_collection_group.rule_collections.append(filter_rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def remove_azure_firewall_policy_rule_collection(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
rule_collection_group.rule_collections.remove(rule_collection)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def list_azure_firewall_policy_rule_collection(cmd, resource_group_name,
firewall_policy_name, rule_collection_group_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
return rule_collection_group.rule_collections
def add_azure_firewall_policy_filter_rule(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_name, rule_type,
description=None, ip_protocols=None, source_addresses=None,
destination_addresses=None, destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
translated_address=None, translated_port=None,
target_urls=None, enable_tls_inspection=False, web_categories=None):
(NetworkRule,
FirewallPolicyRuleApplicationProtocol,
ApplicationRule,
NatRule) = cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'NatRule')
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
target_rule_collection = None
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise CLIError("Cannot find corresponding rule.")
if target_rule_collection.rule_collection_type == "FirewallPolicyFilterRule" and rule_type == 'NatRule':
raise CLIError("FirewallPolicyFilterRule doesn't support Nat rule.")
if target_rule_collection.rule_collection_type == "FirewallPolicyNatRule" and rule_type in ['NetworkRule',
'ApplicationRule']:
raise CLIError("FirewallPolicyNatRule supports neither Network rule nor Application rule.")
rule = None
if rule_type == "NetworkRule":
rule = NetworkRule(name=rule_name,
description=description,
rule_type=rule_type,
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
source_ip_groups=source_ip_groups,
destination_ip_groups=destination_ip_groups)
elif rule_type == 'ApplicationRule':
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
rule = ApplicationRule(name=rule_name,
description=description,
rule_type=rule_type,
source_addresses=source_addresses,
protocols=protocols,
destination_addresses=destination_addresses,
fqdn_tags=fqdn_tags,
target_fqdns=target_fqdns,
target_urls=target_urls,
source_ip_groups=source_ip_groups,
terminate_tls=enable_tls_inspection,
web_categories=web_categories)
elif rule_type == 'NatRule':
rule = NatRule(name=rule_name,
description=description,
rule_type="NatRule",
ip_protocols=ip_protocols,
source_addresses=source_addresses,
destination_addresses=destination_addresses,
destination_ports=destination_ports,
translated_address=translated_address,
translated_port=translated_port,
source_ip_groups=source_ip_groups)
target_rule_collection.rules.append(rule)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
def remove_azure_firewall_policy_filter_rule(cmd, resource_group_name, firewall_policy_name,
rule_collection_group_name,
rule_collection_name, rule_name):
client = network_client_factory(cmd.cli_ctx).firewall_policy_rule_collection_groups
rule_collection_group = client.get(resource_group_name, firewall_policy_name, rule_collection_group_name)
target_rule_collection = None
for rule_collection in rule_collection_group.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise CLIError("Cannot find corresponding rule collection.")
for rule in target_rule_collection.rules:
if rule.name == rule_name:
target_rule_collection.rules.remove(rule)
return client.begin_create_or_update(resource_group_name, firewall_policy_name,
rule_collection_group_name, rule_collection_group)
# pylint: disable=too-many-locals
def update_azure_firewall_policy_filter_rule(cmd, instance, rule_collection_name, rule_name,
description=None, ip_protocols=None, source_addresses=None,
destination_addresses=None, destination_ports=None,
protocols=None, fqdn_tags=None, target_fqdns=None,
source_ip_groups=None, destination_ip_groups=None,
translated_address=None, translated_port=None,
target_urls=None, enable_tls_inspection=None, web_categories=None):
(NetworkRule,
FirewallPolicyRuleApplicationProtocol,
ApplicationRule,
NatRule) = cmd.get_models('NetworkRule', 'FirewallPolicyRuleApplicationProtocol',
'ApplicationRule', 'NatRule')
target_rule_collection = None
for rule_collection in instance.rule_collections:
if rule_collection.name == rule_collection_name:
target_rule_collection = rule_collection
if target_rule_collection is None:
raise UserFault("Cannot find corresponding rule, please check parameters")
for i in range(0, len(target_rule_collection.rules)):
rule = target_rule_collection.rules[i]
if rule_name == rule.name:
new_rule = {}
if rule.rule_type == "NetworkRule":
new_rule = NetworkRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
ip_protocols=(ip_protocols or rule.ip_protocols),
source_addresses=(source_addresses or rule.source_addresses),
destination_addresses=(destination_addresses or rule.destination_addresses),
destination_ports=(destination_ports or rule.destination_ports),
source_ip_groups=(source_ip_groups or rule.source_ip_groups),
destination_ip_groups=(destination_ip_groups or rule.destination_ip_groups))
elif rule.rule_type == 'ApplicationRule':
def map_application_rule_protocol(item):
return FirewallPolicyRuleApplicationProtocol(protocol_type=item['protocol_type'],
port=int(item['port']))
protocols = list(map(map_application_rule_protocol, protocols))
new_rule = ApplicationRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
source_addresses=(source_addresses or rule.source_addresses),
protocols=(protocols or rule.protocols),
destination_addresses=(destination_addresses or rule.destination_addresses),
fqdn_tags=(fqdn_tags or rule.fqdn_tags),
target_fqdns=(target_fqdns or rule.target_fqdns),
target_urls=(target_urls or rule.target_urls),
source_ip_groups=(source_ip_groups or rule.source_ip_groups),
terminate_tls=(enable_tls_inspection or rule.terminate_tls),
web_categories=(web_categories or rule.web_categories))
elif rule.rule_type == 'NatRule':
new_rule = NatRule(name=rule_name,
description=(description or rule.description),
rule_type=rule.rule_type,
ip_protocols=(ip_protocols or rule.ip_protocols),
source_addresses=(source_addresses or rule.source_addresses),
destination_addresses=(destination_addresses or rule.destination_addresses),
destination_ports=(destination_ports or rule.destination_ports),
translated_address=(translated_address or rule.translated_address),
translated_port=(translated_port or rule.translated_port),
source_ip_groups=(source_ip_groups or rule.source_ip_groups))
if new_rule:
target_rule_collection.rules[i] = copy.deepcopy(new_rule)
return instance
else:
raise ServiceError(f'Undefined rule_type : {rule.rule_type}')
raise UserFault(f'{rule_name} does not exist!!!')
# endregion
| true | true |
1c35f8af4ba9ea456444a61bfa22f1df740f3718 | 8,365 | py | Python | wagtail/wagtailimages/tests.py | ollywillans/wagtail | 5f332a1ca8907172c0a5b4a6a820e17dabbb403b | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/tests.py | ollywillans/wagtail | 5f332a1ca8907172c0a5b4a6a820e17dabbb403b | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailimages/tests.py | ollywillans/wagtail | 5f332a1ca8907172c0a5b4a6a820e17dabbb403b | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from django import template
from django.contrib.auth.models import User, Group, Permission
from django.core.urlresolvers import reverse
from wagtail.wagtailcore.models import Site
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.templatetags import image_tags
def get_test_image_file():
return 'wagtail/wagtailimages/static/wagtailimages/images/test.png'
Image = get_image_model()
class TestImage(TestCase):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_is_portrait(self):
self.assertFalse(self.image.is_portrait())
def test_is_landscape(self):
self.assertTrue(self.image.is_landscape())
class TestImagePermissions(TestCase):
def setUp(self):
# Create some user accounts for testing permissions
self.user = User.objects.create_user(username='user', email='user@email.com', password='password')
self.owner = User.objects.create_user(username='owner', email='owner@email.com', password='password')
self.editor = User.objects.create_user(username='editor', email='editor@email.com', password='password')
self.editor.groups.add(Group.objects.get(name='Editors'))
self.administrator = User.objects.create_superuser(username='administrator', email='administrator@email.com', password='password')
# Owner user must have the add_document permission
self.owner.user_permissions.add(Permission.objects.get(codename='add_image'))
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
uploaded_by_user=self.owner,
file=get_test_image_file(),
)
def test_administrator_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.administrator))
def test_editor_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.editor))
def test_owner_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.owner))
def test_user_cant_edit(self):
self.assertFalse(self.image.is_editable_by_user(self.user))
class TestRenditions(TestCase):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_minification(self):
rendition = self.image.get_rendition('width-400')
# Check size
self.assertEqual(rendition.width, 400)
self.assertEqual(rendition.height, 300)
def test_resize_to_max(self):
rendition = self.image.get_rendition('max-100x100')
# Check size
self.assertEqual(rendition.width, 100)
self.assertEqual(rendition.height, 75)
def test_resize_to_min(self):
rendition = self.image.get_rendition('min-120x120')
# Check size
self.assertEqual(rendition.width, 160)
self.assertEqual(rendition.height, 120)
def test_cache(self):
# Get two renditions with the same filter
first_rendition = self.image.get_rendition('width-400')
second_rendition = self.image.get_rendition('width-400')
# Check that they are the same object
self.assertEqual(first_rendition, second_rendition)
class TestImageTag(TestCase):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def render_image_tag(self, image, filter_spec):
temp = template.Template('{% load image_tags %}{% image image_obj ' + filter_spec + '%}')
context = template.Context({'image_obj': image})
return temp.render(context)
def test_image_tag(self):
result = self.render_image_tag(self.image, 'width-400')
# Check that all the required HTML attributes are set
self.assertTrue('width="400"' in result)
self.assertTrue('height="300"' in result)
self.assertTrue('alt="Test image"' in result)
## ===== ADMIN VIEWS =====
def get_default_host():
return Site.objects.filter(is_default_site=True).first().root_url.split('://')[1]
def login(client):
# Create a user
User.objects.create_superuser(username='test', email='test@email.com', password='password')
# Login
client.login(username='test', password='password')
class TestImageIndexView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_index'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_add_image'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageEditView(TestCase):
def setUp(self):
login(self.client)
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_edit_image', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageDeleteView(TestCase):
def setUp(self):
login(self.client)
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_delete_image', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageChooserView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestImageChooserChosenView(TestCase):
def setUp(self):
login(self.client)
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_image_chosen', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageChooserUploadView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser_upload'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
| 32.297297 | 138 | 0.663957 | from django.test import TestCase
from django import template
from django.contrib.auth.models import User, Group, Permission
from django.core.urlresolvers import reverse
from wagtail.wagtailcore.models import Site
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.templatetags import image_tags
def get_test_image_file():
return 'wagtail/wagtailimages/static/wagtailimages/images/test.png'
Image = get_image_model()
class TestImage(TestCase):
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_is_portrait(self):
self.assertFalse(self.image.is_portrait())
def test_is_landscape(self):
self.assertTrue(self.image.is_landscape())
class TestImagePermissions(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='user', email='user@email.com', password='password')
self.owner = User.objects.create_user(username='owner', email='owner@email.com', password='password')
self.editor = User.objects.create_user(username='editor', email='editor@email.com', password='password')
self.editor.groups.add(Group.objects.get(name='Editors'))
self.administrator = User.objects.create_superuser(username='administrator', email='administrator@email.com', password='password')
self.owner.user_permissions.add(Permission.objects.get(codename='add_image'))
self.image = Image.objects.create(
title="Test image",
uploaded_by_user=self.owner,
file=get_test_image_file(),
)
def test_administrator_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.administrator))
def test_editor_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.editor))
def test_owner_can_edit(self):
self.assertTrue(self.image.is_editable_by_user(self.owner))
def test_user_cant_edit(self):
self.assertFalse(self.image.is_editable_by_user(self.user))
class TestRenditions(TestCase):
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_minification(self):
rendition = self.image.get_rendition('width-400')
self.assertEqual(rendition.width, 400)
self.assertEqual(rendition.height, 300)
def test_resize_to_max(self):
rendition = self.image.get_rendition('max-100x100')
self.assertEqual(rendition.width, 100)
self.assertEqual(rendition.height, 75)
def test_resize_to_min(self):
rendition = self.image.get_rendition('min-120x120')
self.assertEqual(rendition.width, 160)
self.assertEqual(rendition.height, 120)
def test_cache(self):
first_rendition = self.image.get_rendition('width-400')
second_rendition = self.image.get_rendition('width-400')
self.assertEqual(first_rendition, second_rendition)
class TestImageTag(TestCase):
def setUp(self):
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def render_image_tag(self, image, filter_spec):
temp = template.Template('{% load image_tags %}{% image image_obj ' + filter_spec + '%}')
context = template.Context({'image_obj': image})
return temp.render(context)
def test_image_tag(self):
result = self.render_image_tag(self.image, 'width-400')
self.assertTrue('width="400"' in result)
self.assertTrue('height="300"' in result)
self.assertTrue('alt="Test image"' in result)
return Site.objects.filter(is_default_site=True).first().root_url.split('://')[1]
def login(client):
User.objects.create_superuser(username='test', email='test@email.com', password='password')
client.login(username='test', password='password')
class TestImageIndexView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_index'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_add_image'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageEditView(TestCase):
def setUp(self):
login(self.client)
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_edit_image', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageDeleteView(TestCase):
def setUp(self):
login(self.client)
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_delete_image', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageChooserView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestImageChooserChosenView(TestCase):
def setUp(self):
login(self.client)
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_image_chosen', args=(self.image.id,)), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
class TestImageChooserUploadView(TestCase):
def setUp(self):
login(self.client)
def get(self, params={}):
return self.client.get(reverse('wagtailimages_chooser_upload'), params, HTTP_HOST=get_default_host())
def test_status_code(self):
self.assertEqual(self.get().status_code, 200)
| true | true |
1c35f943fd50eb5f610f8e4b95f819e880e64d1e | 2,049 | py | Python | main/migrations/0003_auto_20201119_2033.py | vestial/vision-video-analyzer | f5c5f9c0f0522008e86641648fd1591507ca8f6b | [
"MIT"
] | 1 | 2020-10-30T00:49:21.000Z | 2020-10-30T00:49:21.000Z | main/migrations/0003_auto_20201119_2033.py | vestial/vision-video-analyzer | f5c5f9c0f0522008e86641648fd1591507ca8f6b | [
"MIT"
] | null | null | null | main/migrations/0003_auto_20201119_2033.py | vestial/vision-video-analyzer | f5c5f9c0f0522008e86641648fd1591507ca8f6b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-19 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20201119_2021'),
]
operations = [
migrations.AlterField(
model_name='video',
name='bit_depth',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='bit_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='frame_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='resolution',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='sample_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='shutter_speed',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
]
| 33.048387 | 53 | 0.401659 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20201119_2021'),
]
operations = [
migrations.AlterField(
model_name='video',
name='bit_depth',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='bit_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='frame_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='resolution',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='sample_rate',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
migrations.AlterField(
model_name='video',
name='shutter_speed',
field=models.CharField(blank=True,
default='Unknown',
max_length=64,
null=True),
),
]
| true | true |
1c35f95481c364cc4083287ba3cea9fbdc4a937f | 1,794 | py | Python | bw2io/importers/ecospold2_biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/importers/ecospold2_biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | bw2io/importers/ecospold2_biosphere.py | mfastudillo/brightway2-io | dc383ddb6003a46e78259aeb7f87b9d80965d689 | [
"BSD-3-Clause"
] | null | null | null | from .base_lci import LCIImporter
from ..strategies import drop_unspecified_subcategories, normalize_units, ensure_categories_are_tuples
from bw2data.utils import recursive_str_to_unicode
from lxml import objectify
import os
import json
EMISSIONS_CATEGORIES = {
"air": "emission",
"soil": "emission",
"water": "emission",
}
class Ecospold2BiosphereImporter(LCIImporter):
format = "Ecoinvent XML"
def __init__(self, name="biosphere3"):
self.db_name = name
self.data = self.extract()
self.strategies = [
normalize_units,
drop_unspecified_subcategories,
ensure_categories_are_tuples,
]
def extract(self):
def extract_flow_data(o):
ds = {
"categories": (
o.compartment.compartment.text,
o.compartment.subcompartment.text,
),
"code": o.get("id"),
"CAS number": o.get("casNumber"),
"name": o.name.text,
"database": self.db_name,
"exchanges": [],
"unit": o.unitName.text,
}
ds["type"] = EMISSIONS_CATEGORIES.get(
ds["categories"][0], ds["categories"][0]
)
return ds
lci_dirpath = os.path.join(os.path.dirname(__file__), "..", "data", "lci")
fp = os.path.join(lci_dirpath, "ecoinvent elementary flows 3.7.xml")
root = objectify.parse(open(fp, encoding="utf-8")).getroot()
flow_data = recursive_str_to_unicode(
[extract_flow_data(ds) for ds in root.iterchildren()]
)
previous = os.path.join(lci_dirpath, "previous elementary flows.json")
return flow_data + json.load(open(previous))
| 32.035714 | 102 | 0.580825 | from .base_lci import LCIImporter
from ..strategies import drop_unspecified_subcategories, normalize_units, ensure_categories_are_tuples
from bw2data.utils import recursive_str_to_unicode
from lxml import objectify
import os
import json
EMISSIONS_CATEGORIES = {
"air": "emission",
"soil": "emission",
"water": "emission",
}
class Ecospold2BiosphereImporter(LCIImporter):
format = "Ecoinvent XML"
def __init__(self, name="biosphere3"):
self.db_name = name
self.data = self.extract()
self.strategies = [
normalize_units,
drop_unspecified_subcategories,
ensure_categories_are_tuples,
]
def extract(self):
def extract_flow_data(o):
ds = {
"categories": (
o.compartment.compartment.text,
o.compartment.subcompartment.text,
),
"code": o.get("id"),
"CAS number": o.get("casNumber"),
"name": o.name.text,
"database": self.db_name,
"exchanges": [],
"unit": o.unitName.text,
}
ds["type"] = EMISSIONS_CATEGORIES.get(
ds["categories"][0], ds["categories"][0]
)
return ds
lci_dirpath = os.path.join(os.path.dirname(__file__), "..", "data", "lci")
fp = os.path.join(lci_dirpath, "ecoinvent elementary flows 3.7.xml")
root = objectify.parse(open(fp, encoding="utf-8")).getroot()
flow_data = recursive_str_to_unicode(
[extract_flow_data(ds) for ds in root.iterchildren()]
)
previous = os.path.join(lci_dirpath, "previous elementary flows.json")
return flow_data + json.load(open(previous))
| true | true |
1c35fa017b3576d8736302353378d401f2b55622 | 18,486 | py | Python | src/azure-cli/azure/cli/command_modules/servicefabric/_validators.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | 2 | 2021-03-24T21:06:25.000Z | 2021-03-24T21:07:59.000Z | src/azure-cli/azure/cli/command_modules/servicefabric/_validators.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/servicefabric/_validators.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | 9 | 2020-02-12T22:53:00.000Z | 2021-06-09T18:59:41.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrestazure.azure_exceptions import CloudError
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.azclierror import ValidationError
from azure.cli.command_modules.servicefabric._sf_utils import _get_resource_group_by_name
from azure.mgmt.servicefabricmanagedclusters.models import (PartitionScheme, ServiceKind)
from ._client_factory import servicefabric_client_factory, servicefabric_managed_client_factory
logger = get_logger(__name__)
def validate_create_service(namespace):
if namespace.state == 'stateless':
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise CLIError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if not namespace.instance_count:
raise CLIError("--instance-count is required")
else: # stateful
if namespace.instance_count:
raise CLIError("Unexpected parameter --instance-count should only be use with --state stateless")
if not namespace.target_replica_set_size or not namespace.min_replica_set_size:
raise CLIError("--target-replica-set-size and --min-replica-set-size are required")
if not namespace.service_name.startswith(namespace.application_name):
raise CLIError("Invalid service name, the application name must be a prefix of the service name, "
"for example: '{}~{}'".format(namespace.application_name, namespace.service_name))
def validate_update_application(cmd, namespace):
client = servicefabric_client_factory(cmd.cli_ctx)
app = _safe_get_resource(client.applications.get,
(namespace.resource_group_name, namespace.cluster_name, namespace.application_name))
if app is None:
raise CLIError("Application '{}' Not Found.".format(namespace.application_name))
if namespace.application_type_version is not None:
if app.type_version == namespace.application_type_version:
raise CLIError("The application '{}' is alrady running with type version '{}'."
.format(app.name, app.type_version))
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
app.type_name,
namespace.application_type_version))
if type_version is None:
raise CLIError("Application type version {}:{} not found. "
"Create the type version before running this command."
.format(app.type_name, namespace.application_type_version))
if namespace.upgrade_replica_set_check_timeout:
namespace.upgrade_replica_set_check_timeout = int(namespace.upgrade_replica_set_check_timeout)
if namespace.health_check_stable_duration:
namespace.health_check_stable_duration = int(namespace.health_check_stable_duration)
if namespace.health_check_retry_timeout:
namespace.health_check_retry_timeout = int(namespace.health_check_retry_timeout)
if namespace.health_check_wait_duration:
namespace.health_check_wait_duration = int(namespace.health_check_wait_duration)
if namespace.upgrade_timeout:
namespace.upgrade_timeout = int(namespace.upgrade_timeout)
if namespace.upgrade_domain_timeout:
namespace.upgrade_domain_timeout = int(namespace.upgrade_domain_timeout)
if namespace.minimum_nodes:
namespace.minimum_nodes = int(namespace.minimum_nodes)
if namespace.minimum_nodes < 0:
raise CLIError("minimum_nodes should be a non-negative integer.")
if namespace.maximum_nodes:
namespace.maximum_nodes = int(namespace.maximum_nodes)
if namespace.maximum_nodes < 0:
raise CLIError("maximum_nodes should be a non-negative integer.")
def validate_create_application(cmd, namespace):
client = servicefabric_client_factory(cmd.cli_ctx)
if namespace.package_url is None:
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
namespace.application_type_name,
namespace.application_type_version))
if type_version is None:
raise CLIError("Application type version {}:{} not found. "
"Create the type version before running this command or use --package-url to create it."
.format(namespace.application_type_name, namespace.application_type_version))
if namespace.minimum_nodes:
namespace.minimum_nodes = int(namespace.minimum_nodes)
if namespace.minimum_nodes < 0:
raise CLIError("minimum_nodes should be a non-negative integer.")
if namespace.maximum_nodes:
namespace.maximum_nodes = int(namespace.maximum_nodes)
if namespace.maximum_nodes < 0:
raise CLIError("maximum_nodes should be a non-negative integer.")
# Managed Clusters
def validate_create_managed_cluster(cmd, namespace):
rg = _get_resource_group_by_name(cmd.cli_ctx, namespace.resource_group_name)
if rg is None and namespace.location is None:
raise CLIError("Resource group {} doesn't exists and location is not provided. "
"Either create the resource group before running this command or provide the location parameter."
.format(namespace.resource_group_name))
if namespace.client_cert_issuer_thumbprint is not None:
if namespace.client_cert_common_name is None:
raise CLIError("--client-cert-issuer-thumbprint should be used with --client-cert-common-name.")
def validate_create_managed_service(namespace):
if namespace.service_type is None:
raise CLIError("--service-type is required")
if namespace.state.lower() == ServiceKind.STATELESS.lower():
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if not namespace.instance_count:
raise ValidationError("--instance-count is required")
namespace.instance_count = int(namespace.instance_count)
elif namespace.state.lower() == ServiceKind.STATEFUL.lower():
if namespace.instance_count:
raise ValidationError("Unexpected parameter --instance-count should only be use with --state stateless")
if not namespace.target_replica_set_size or not namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size are required")
namespace.target_replica_set_size = int(namespace.target_replica_set_size)
namespace.min_replica_set_size = int(namespace.min_replica_set_size)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % namespace.state)
if namespace.partition_scheme is None:
raise ValidationError("--partition-scheme is required")
if namespace.partition_scheme.lower() == PartitionScheme.NAMED.lower():
if namespace.partition_names is None:
raise ValidationError("--partition-names is required for partition scheme '%s'"
% namespace.partition_scheme)
elif namespace.partition_scheme.lower() == PartitionScheme.SINGLETON.lower():
pass # No parameters needed for singleton
elif namespace.partition_scheme.lower() == PartitionScheme.UNIFORM_INT64_RANGE.lower():
if namespace.partition_count is None or namespace.low_key is None or namespace.high_key is None:
raise ValidationError(
"--partition-count, --low-key and --high-key are required for partition scheme '%s'"
% namespace.partition_scheme)
namespace.partition_count = int(namespace.partition_count)
namespace.low_key = int(namespace.low_key)
namespace.high_key = int(namespace.high_key)
else:
raise ValidationError(
"Invalid --partition_scheme '%s': service partition_scheme is not valid." % namespace.partition_scheme)
def validate_update_managed_service(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if namespace.instance_count is not None:
namespace.instance_count = int(namespace.instance_count)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.instance_count:
raise ValidationError("Unexpected parameter --instance-count should only be use with --state stateless")
if namespace.target_replica_set_size is not None:
namespace.target_replica_set_size = int(namespace.target_replica_set_size)
if namespace.min_replica_set_size is not None:
namespace.min_replica_set_size = int(namespace.min_replica_set_size)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
def validate_create_managed_service_load_metric(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.metric_name is None or namespace.weight is None or namespace.default_load is None:
raise ValidationError("--metric-name, --weight and --default-load are required")
if namespace.primary_default_load is not None or namespace.secondary_default_load is not None:
raise ValidationError(
"--primary-default-load and --secondary-default-load can only be used for stateful services."
)
namespace.default_load = int(namespace.default_load)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.metric_name is None or namespace.weight is None or \
namespace.primary_default_load is None or namespace.secondary_default_load is None:
raise ValidationError("--metric-name, --weight, --primary-default-load and "
"--secondary-default-load are required")
if namespace.default_load is not None:
raise ValidationError("--default-load can only be used for stateless services.")
namespace.primary_default_load = int(namespace.primary_default_load)
namespace.secondary_default_load = int(namespace.secondary_default_load)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
if any(namespace.metric_name == metric.name for metric in service.properties.service_load_metrics):
raise ValidationError("Duplicate metric names are not allowed: %s." % namespace.metric_name)
def validate_update_managed_service_load_metric(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise CLIError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.primary_default_load is not None or namespace.secondary_default_load is not None:
raise ValidationError(
"--primary-default-load and --secondary-default-load can only be used for stateful services."
)
if namespace.default_load is not None:
namespace.default_load = int(namespace.default_load)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.default_load is not None:
raise ValidationError("--default-load can only be used for stateless services.")
if namespace.primary_default_load is not None:
namespace.primary_default_load = int(namespace.primary_default_load)
if namespace.secondary_default_load is not None:
namespace.secondary_default_load = int(namespace.secondary_default_load)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
def validate_create_managed_service_correlation(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.correlation_scheme:
raise ValidationError("There can only be one service correlation per service.")
def validate_update_managed_service_correlation(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
def validate_update_managed_application(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
app = _safe_get_resource(client.applications.get,
(namespace.resource_group_name, namespace.cluster_name, namespace.application_name))
if app is None:
raise CLIError("Application '{}' Not Found.".format(namespace.application_name))
if namespace.application_type_version is not None:
if app.version.endswith(namespace.application_type_version):
raise ValidationError("The application '{}' is alrady running with type version '{}'."
.format(app.name, app.version))
app_type_name = app.version.split("/")[-3]
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
app_type_name,
namespace.application_type_version))
if type_version is None:
raise ValidationError("Application type version {}:{} not found. "
"Create the type version before running this command."
.format(app.type_name, namespace.application_type_version))
if namespace.upgrade_replica_set_check_timeout:
namespace.upgrade_replica_set_check_timeout = int(namespace.upgrade_replica_set_check_timeout)
if namespace.health_check_stable_duration:
namespace.health_check_stable_duration = int(namespace.health_check_stable_duration)
if namespace.health_check_retry_timeout:
namespace.health_check_retry_timeout = int(namespace.health_check_retry_timeout)
if namespace.health_check_wait_duration:
namespace.health_check_wait_duration = int(namespace.health_check_wait_duration)
if namespace.upgrade_timeout:
namespace.upgrade_timeout = int(namespace.upgrade_timeout)
if namespace.upgrade_domain_timeout:
namespace.upgrade_domain_timeout = int(namespace.upgrade_domain_timeout)
def validate_create_managed_application(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
if namespace.package_url is None:
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
namespace.application_type_name,
namespace.application_type_version))
if type_version is None:
raise ValidationError("Application type version {}:{} not found. "
"Create the type version before running this "
"command or use --package-url to create it."
.format(namespace.application_type_name, namespace.application_type_version))
# Helpers
def _safe_get_resource(getResourceAction, params):
try:
return getResourceAction(*params)
except CloudError as ex:
if ex.error.error == 'ResourceNotFound':
return None
logger.warning("Unable to get resource, exception: %s", ex)
raise
| 58.315457 | 120 | 0.679054 |
from msrestazure.azure_exceptions import CloudError
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.azclierror import ValidationError
from azure.cli.command_modules.servicefabric._sf_utils import _get_resource_group_by_name
from azure.mgmt.servicefabricmanagedclusters.models import (PartitionScheme, ServiceKind)
from ._client_factory import servicefabric_client_factory, servicefabric_managed_client_factory
logger = get_logger(__name__)
def validate_create_service(namespace):
if namespace.state == 'stateless':
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise CLIError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if not namespace.instance_count:
raise CLIError("--instance-count is required")
else:
if namespace.instance_count:
raise CLIError("Unexpected parameter --instance-count should only be use with --state stateless")
if not namespace.target_replica_set_size or not namespace.min_replica_set_size:
raise CLIError("--target-replica-set-size and --min-replica-set-size are required")
if not namespace.service_name.startswith(namespace.application_name):
raise CLIError("Invalid service name, the application name must be a prefix of the service name, "
"for example: '{}~{}'".format(namespace.application_name, namespace.service_name))
def validate_update_application(cmd, namespace):
client = servicefabric_client_factory(cmd.cli_ctx)
app = _safe_get_resource(client.applications.get,
(namespace.resource_group_name, namespace.cluster_name, namespace.application_name))
if app is None:
raise CLIError("Application '{}' Not Found.".format(namespace.application_name))
if namespace.application_type_version is not None:
if app.type_version == namespace.application_type_version:
raise CLIError("The application '{}' is alrady running with type version '{}'."
.format(app.name, app.type_version))
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
app.type_name,
namespace.application_type_version))
if type_version is None:
raise CLIError("Application type version {}:{} not found. "
"Create the type version before running this command."
.format(app.type_name, namespace.application_type_version))
if namespace.upgrade_replica_set_check_timeout:
namespace.upgrade_replica_set_check_timeout = int(namespace.upgrade_replica_set_check_timeout)
if namespace.health_check_stable_duration:
namespace.health_check_stable_duration = int(namespace.health_check_stable_duration)
if namespace.health_check_retry_timeout:
namespace.health_check_retry_timeout = int(namespace.health_check_retry_timeout)
if namespace.health_check_wait_duration:
namespace.health_check_wait_duration = int(namespace.health_check_wait_duration)
if namespace.upgrade_timeout:
namespace.upgrade_timeout = int(namespace.upgrade_timeout)
if namespace.upgrade_domain_timeout:
namespace.upgrade_domain_timeout = int(namespace.upgrade_domain_timeout)
if namespace.minimum_nodes:
namespace.minimum_nodes = int(namespace.minimum_nodes)
if namespace.minimum_nodes < 0:
raise CLIError("minimum_nodes should be a non-negative integer.")
if namespace.maximum_nodes:
namespace.maximum_nodes = int(namespace.maximum_nodes)
if namespace.maximum_nodes < 0:
raise CLIError("maximum_nodes should be a non-negative integer.")
def validate_create_application(cmd, namespace):
client = servicefabric_client_factory(cmd.cli_ctx)
if namespace.package_url is None:
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
namespace.application_type_name,
namespace.application_type_version))
if type_version is None:
raise CLIError("Application type version {}:{} not found. "
"Create the type version before running this command or use --package-url to create it."
.format(namespace.application_type_name, namespace.application_type_version))
if namespace.minimum_nodes:
namespace.minimum_nodes = int(namespace.minimum_nodes)
if namespace.minimum_nodes < 0:
raise CLIError("minimum_nodes should be a non-negative integer.")
if namespace.maximum_nodes:
namespace.maximum_nodes = int(namespace.maximum_nodes)
if namespace.maximum_nodes < 0:
raise CLIError("maximum_nodes should be a non-negative integer.")
def validate_create_managed_cluster(cmd, namespace):
rg = _get_resource_group_by_name(cmd.cli_ctx, namespace.resource_group_name)
if rg is None and namespace.location is None:
raise CLIError("Resource group {} doesn't exists and location is not provided. "
"Either create the resource group before running this command or provide the location parameter."
.format(namespace.resource_group_name))
if namespace.client_cert_issuer_thumbprint is not None:
if namespace.client_cert_common_name is None:
raise CLIError("--client-cert-issuer-thumbprint should be used with --client-cert-common-name.")
def validate_create_managed_service(namespace):
if namespace.service_type is None:
raise CLIError("--service-type is required")
if namespace.state.lower() == ServiceKind.STATELESS.lower():
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if not namespace.instance_count:
raise ValidationError("--instance-count is required")
namespace.instance_count = int(namespace.instance_count)
elif namespace.state.lower() == ServiceKind.STATEFUL.lower():
if namespace.instance_count:
raise ValidationError("Unexpected parameter --instance-count should only be use with --state stateless")
if not namespace.target_replica_set_size or not namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size are required")
namespace.target_replica_set_size = int(namespace.target_replica_set_size)
namespace.min_replica_set_size = int(namespace.min_replica_set_size)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % namespace.state)
if namespace.partition_scheme is None:
raise ValidationError("--partition-scheme is required")
if namespace.partition_scheme.lower() == PartitionScheme.NAMED.lower():
if namespace.partition_names is None:
raise ValidationError("--partition-names is required for partition scheme '%s'"
% namespace.partition_scheme)
elif namespace.partition_scheme.lower() == PartitionScheme.SINGLETON.lower():
pass # No parameters needed for singleton
elif namespace.partition_scheme.lower() == PartitionScheme.UNIFORM_INT64_RANGE.lower():
if namespace.partition_count is None or namespace.low_key is None or namespace.high_key is None:
raise ValidationError(
"--partition-count, --low-key and --high-key are required for partition scheme '%s'"
% namespace.partition_scheme)
namespace.partition_count = int(namespace.partition_count)
namespace.low_key = int(namespace.low_key)
namespace.high_key = int(namespace.high_key)
else:
raise ValidationError(
"Invalid --partition_scheme '%s': service partition_scheme is not valid." % namespace.partition_scheme)
def validate_update_managed_service(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.target_replica_set_size or namespace.min_replica_set_size:
raise ValidationError("--target-replica-set-size and --min-replica-set-size should only be use with "
"--state stateful")
if namespace.instance_count is not None:
namespace.instance_count = int(namespace.instance_count)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.instance_count:
raise ValidationError("Unexpected parameter --instance-count should only be use with --state stateless")
if namespace.target_replica_set_size is not None:
namespace.target_replica_set_size = int(namespace.target_replica_set_size)
if namespace.min_replica_set_size is not None:
namespace.min_replica_set_size = int(namespace.min_replica_set_size)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
def validate_create_managed_service_load_metric(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.metric_name is None or namespace.weight is None or namespace.default_load is None:
raise ValidationError("--metric-name, --weight and --default-load are required")
if namespace.primary_default_load is not None or namespace.secondary_default_load is not None:
raise ValidationError(
"--primary-default-load and --secondary-default-load can only be used for stateful services."
)
namespace.default_load = int(namespace.default_load)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.metric_name is None or namespace.weight is None or \
namespace.primary_default_load is None or namespace.secondary_default_load is None:
raise ValidationError("--metric-name, --weight, --primary-default-load and "
"--secondary-default-load are required")
if namespace.default_load is not None:
raise ValidationError("--default-load can only be used for stateless services.")
namespace.primary_default_load = int(namespace.primary_default_load)
namespace.secondary_default_load = int(namespace.secondary_default_load)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
if any(namespace.metric_name == metric.name for metric in service.properties.service_load_metrics):
raise ValidationError("Duplicate metric names are not allowed: %s." % namespace.metric_name)
def validate_update_managed_service_load_metric(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise CLIError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.service_kind.lower() == ServiceKind.STATELESS.lower():
if namespace.primary_default_load is not None or namespace.secondary_default_load is not None:
raise ValidationError(
"--primary-default-load and --secondary-default-load can only be used for stateful services."
)
if namespace.default_load is not None:
namespace.default_load = int(namespace.default_load)
elif service.properties.service_kind.lower() == ServiceKind.STATEFUL.lower():
if namespace.default_load is not None:
raise ValidationError("--default-load can only be used for stateless services.")
if namespace.primary_default_load is not None:
namespace.primary_default_load = int(namespace.primary_default_load)
if namespace.secondary_default_load is not None:
namespace.secondary_default_load = int(namespace.secondary_default_load)
else:
raise ValidationError("Invalid --state '%s': service state is not valid." % service.properties.service_kind)
def validate_create_managed_service_correlation(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
if service.properties.correlation_scheme:
raise ValidationError("There can only be one service correlation per service.")
def validate_update_managed_service_correlation(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
service = _safe_get_resource(client.services.get,
(namespace.resource_group_name, namespace.cluster_name,
namespace.application_name, namespace.service_name))
if service is None:
raise ValidationError("Service '{}' Not Found.".format(namespace.service_name))
def validate_update_managed_application(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
app = _safe_get_resource(client.applications.get,
(namespace.resource_group_name, namespace.cluster_name, namespace.application_name))
if app is None:
raise CLIError("Application '{}' Not Found.".format(namespace.application_name))
if namespace.application_type_version is not None:
if app.version.endswith(namespace.application_type_version):
raise ValidationError("The application '{}' is alrady running with type version '{}'."
.format(app.name, app.version))
app_type_name = app.version.split("/")[-3]
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
app_type_name,
namespace.application_type_version))
if type_version is None:
raise ValidationError("Application type version {}:{} not found. "
"Create the type version before running this command."
.format(app.type_name, namespace.application_type_version))
if namespace.upgrade_replica_set_check_timeout:
namespace.upgrade_replica_set_check_timeout = int(namespace.upgrade_replica_set_check_timeout)
if namespace.health_check_stable_duration:
namespace.health_check_stable_duration = int(namespace.health_check_stable_duration)
if namespace.health_check_retry_timeout:
namespace.health_check_retry_timeout = int(namespace.health_check_retry_timeout)
if namespace.health_check_wait_duration:
namespace.health_check_wait_duration = int(namespace.health_check_wait_duration)
if namespace.upgrade_timeout:
namespace.upgrade_timeout = int(namespace.upgrade_timeout)
if namespace.upgrade_domain_timeout:
namespace.upgrade_domain_timeout = int(namespace.upgrade_domain_timeout)
def validate_create_managed_application(cmd, namespace):
client = servicefabric_managed_client_factory(cmd.cli_ctx)
if namespace.package_url is None:
type_version = _safe_get_resource(client.application_type_versions.get,
(namespace.resource_group_name,
namespace.cluster_name,
namespace.application_type_name,
namespace.application_type_version))
if type_version is None:
raise ValidationError("Application type version {}:{} not found. "
"Create the type version before running this "
"command or use --package-url to create it."
.format(namespace.application_type_name, namespace.application_type_version))
# Helpers
def _safe_get_resource(getResourceAction, params):
try:
return getResourceAction(*params)
except CloudError as ex:
if ex.error.error == 'ResourceNotFound':
return None
logger.warning("Unable to get resource, exception: %s", ex)
raise
| true | true |
1c35fa2c8dfc7188123cd824fc98041bce1624bb | 2,588 | py | Python | RecoMuon/MuonIdentification/test/runME0MuonReco_Example.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoMuon/MuonIdentification/test/runME0MuonReco_Example.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | RecoMuon/MuonIdentification/test/runME0MuonReco_Example.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("ME0SegmentMatchingLocalTest")
## Standard sequence
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.Geometry.GeometryExtended2015MuonGEMDevReco_cff')
process.load('Configuration.Geometry.GeometryExtended2015MuonGEMDev_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
#process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
## TrackingComponentsRecord required for matchers
process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorOpposite_cfi')
process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi')
## global tag for 2021 upgrade studies
from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgrade2021', '')
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgradePLS3', '')
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.me0Customs
from SLHCUpgradeSimulations.Configuration.me0Customs import customise
process = customise(process)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:///somewhere/simevent.root') ##/somewhere/simevent.root" }
)
#process.load('RecoLocalMuon.GEMRecHit.me0RecHits_cfi')
#process.load('RecoLocalMuon.GEMSegments.me0Segments_cfi')
process.load('RecoMuon.MuonIdentification.me0MuonReco_cff')
#process.p = cms.Path(process.me0RecHits*process.me0Segments*process.me0MuonReco)
process.p = cms.Path(process.me0MuonReco)
#process.p = cms.Path(process.me0RecHits*process.me0Segments)
process.PoolSource.fileNames = [
'file:out_local_reco_me0segment.root'
]
process.o1 = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_me0SegmentMatcher_*_*'
#'drop *',
##'keep *_me0SegmentMatching_*_*',
#'keep *_me0MuonConverting_*_*',
),
fileName = cms.untracked.string('out_me0Reco.root')
)
process.outpath = cms.EndPath(process.o1)
| 38.626866 | 103 | 0.781298 | import FWCore.ParameterSet.Config as cms
process = cms.Process("ME0SegmentMatchingLocalTest")
iguration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.Geometry.GeometryExtended2015MuonGEMDevReco_cff')
process.load('Configuration.Geometry.GeometryExtended2015MuonGEMDev_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
pagator.SteppingHelixPropagatorOpposite_cfi')
process.load('TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAlong_cfi')
port GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
from SLHCUpgradeSimulations.Configuration.me0Customs import customise
process = customise(process)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:///somewhere/simevent.root') lMuon.GEMRecHit.me0RecHits_cfi')
#process.load('RecoLocalMuon.GEMSegments.me0Segments_cfi')
process.load('RecoMuon.MuonIdentification.me0MuonReco_cff')
#process.p = cms.Path(process.me0RecHits*process.me0Segments*process.me0MuonReco)
process.p = cms.Path(process.me0MuonReco)
#process.p = cms.Path(process.me0RecHits*process.me0Segments)
process.PoolSource.fileNames = [
'file:out_local_reco_me0segment.root'
]
process.o1 = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_me0SegmentMatcher_*_*'
#'drop *',
##'keep *_me0SegmentMatching_*_*',
#'keep *_me0MuonConverting_*_*',
),
fileName = cms.untracked.string('out_me0Reco.root')
)
process.outpath = cms.EndPath(process.o1)
| true | true |
1c35fb14205765059508935cc2aae125add36420 | 2,666 | py | Python | test/jpypetest/test_synchronized.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_synchronized.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_synchronized.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | # *****************************************************************************
# Copyright 2017 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import sys
import threading
import time
import jpype
from jpype import synchronized
import common
fail = False
if fail:
# Cause a fail
class _JMonitor(object):
def __init__(self, obj):
pass
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, traceback):
pass
def synchronized(obj):
return _JMonitor(obj)
success = True
glob = 0
obj = None
class MyThread(threading.Thread):
def run(self):
global glob, success, obj
with synchronized(obj):
# Increment the global resource
glob += 1
# Wait for a while
time.sleep(0.1)
# If anything else accessed then it was a fail
if glob != 1:
success = False
# Decrement the global resoure
glob -= 1
class SynchronizedTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
global glob, success, obj
obj = jpype.JClass("java.lang.Object")()
def testSynchronized(self):
global glob, success
glob = 0
success = True
tl = []
# Create 10 threads trying to access the same resource
for i in range(0, 10):
tl.append(MyThread())
# Release them at the same time
for i in tl:
i.start()
# Wait for all to finish
for i in tl:
i.join()
# Verify they did not trample each other
self.assertTrue(success)
def testSyncronizedFail(self):
with self.assertRaisesRegex(TypeError, "Java object is required"):
with jpype.synchronized(object()):
pass
with self.assertRaisesRegex(TypeError, "Java primitives cannot be used"):
with jpype.synchronized(jpype.JInt(1)):
pass
| 27.204082 | 81 | 0.577644 |
import sys
import threading
import time
import jpype
from jpype import synchronized
import common
fail = False
if fail:
class _JMonitor(object):
def __init__(self, obj):
pass
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, traceback):
pass
def synchronized(obj):
return _JMonitor(obj)
success = True
glob = 0
obj = None
class MyThread(threading.Thread):
def run(self):
global glob, success, obj
with synchronized(obj):
glob += 1
time.sleep(0.1)
if glob != 1:
success = False
glob -= 1
class SynchronizedTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
global glob, success, obj
obj = jpype.JClass("java.lang.Object")()
def testSynchronized(self):
global glob, success
glob = 0
success = True
tl = []
for i in range(0, 10):
tl.append(MyThread())
for i in tl:
i.start()
for i in tl:
i.join()
self.assertTrue(success)
def testSyncronizedFail(self):
with self.assertRaisesRegex(TypeError, "Java object is required"):
with jpype.synchronized(object()):
pass
with self.assertRaisesRegex(TypeError, "Java primitives cannot be used"):
with jpype.synchronized(jpype.JInt(1)):
pass
| true | true |
1c35fb709b5cef77115f62cb652253e043a8a8ea | 1,192 | py | Python | figures/pipeline/logger.py | groovetch/edx-figures | a69fc1195c05176ac7dae90b337dd77f4bd9679f | [
"MIT"
] | 43 | 2018-05-29T20:01:25.000Z | 2021-12-02T09:43:17.000Z | figures/pipeline/logger.py | groovetch/edx-figures | a69fc1195c05176ac7dae90b337dd77f4bd9679f | [
"MIT"
] | 330 | 2018-05-30T17:06:15.000Z | 2022-03-16T15:52:22.000Z | figures/pipeline/logger.py | groovetch/edx-figures | a69fc1195c05176ac7dae90b337dd77f4bd9679f | [
"MIT"
] | 40 | 2018-10-06T00:15:58.000Z | 2022-02-14T12:44:45.000Z | '''This module provides baseline logging for the Figures pipeline
Initial focus is on tracking exceptions for Course
'''
from __future__ import absolute_import
import logging
import json
from django.core.serializers.json import DjangoJSONEncoder
from figures.models import PipelineError
from figures import helpers as figure_helpers
default_logger = logging.getLogger(__name__)
def log_error_to_db(error_data, error_type, **kwargs):
data = dict(
error_data=error_data,
error_type=error_type or PipelineError.UNSPECIFIED_DATA,
)
if 'user' in kwargs:
data.update(user=kwargs['user'])
if 'course_id' in kwargs:
data.update(course_id=str(kwargs['course_id']))
if 'site' in kwargs:
data.update(site=kwargs['site'])
PipelineError.objects.create(**data)
def log_error(error_data, error_type=None, **kwargs):
kwargs.get('logger', default_logger).error(json.dumps(
error_data,
sort_keys=True,
indent=1,
cls=DjangoJSONEncoder))
if figure_helpers.log_pipeline_errors_to_db() or kwargs.get('log_pipeline_errors_to_db', False):
log_error_to_db(error_data, error_type, **kwargs)
| 28.380952 | 100 | 0.724832 |
from __future__ import absolute_import
import logging
import json
from django.core.serializers.json import DjangoJSONEncoder
from figures.models import PipelineError
from figures import helpers as figure_helpers
default_logger = logging.getLogger(__name__)
def log_error_to_db(error_data, error_type, **kwargs):
data = dict(
error_data=error_data,
error_type=error_type or PipelineError.UNSPECIFIED_DATA,
)
if 'user' in kwargs:
data.update(user=kwargs['user'])
if 'course_id' in kwargs:
data.update(course_id=str(kwargs['course_id']))
if 'site' in kwargs:
data.update(site=kwargs['site'])
PipelineError.objects.create(**data)
def log_error(error_data, error_type=None, **kwargs):
kwargs.get('logger', default_logger).error(json.dumps(
error_data,
sort_keys=True,
indent=1,
cls=DjangoJSONEncoder))
if figure_helpers.log_pipeline_errors_to_db() or kwargs.get('log_pipeline_errors_to_db', False):
log_error_to_db(error_data, error_type, **kwargs)
| true | true |
1c35fbbb7b76a32da0a528c1537d86f517e48518 | 8,335 | py | Python | models/layers/norm.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | models/layers/norm.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | models/layers/norm.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from .registry import register
@register("conditional_bn_2d")
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.gamma = nn.Embedding(num_classes, num_features)
self.beta = nn.Embedding(num_classes, num_features)
torch.nn.init.ones_(self.gamma.weight)
torch.nn.init.zeros_(self.beta.weight)
def forward(self, x, y):
out = self.bn(x)
gamma = self.gamma(y)
beta = self.beta(y)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
@register("layer_norm")
class LayerNorm(nn.Module):
def __init__(self,
normalized_shape=None,
eps=1e-6,
data_format="channels_last"):
assert normalized_shape is not None
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
@register("spectral_norm")
class SpectralNorm(nn.Module):
"""
Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida
and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
"""
def __init__(self, module=None, name='weight', power_iterations=1):
super().__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = nn.Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
@register("filter_response_norm")
class FilterResponseNorm(nn.Module):
""" Filter Response Normalization """
def __init__(self, num_features=0, ndim=0, eps=None, learnable_eps=False):
"""
Args:
num_features
ndim
eps: if None is given, use the paper value as default.
from paper, fixed_eps=1e-6 and learnable_eps_init=1e-4.
learnable_eps: turn eps to learnable parameter, which is recommended on
fully-connected or 1x1 activation map.
"""
super().__init__()
if eps is None:
if learnable_eps:
eps = 1e-4
else:
eps = 1e-6
self.num_features = num_features
self.init_eps = eps
self.learnable_eps = learnable_eps
self.ndim = ndim
self.mean_dims = list(range(2, 2+ndim))
self.weight = nn.Parameter(torch.ones([1, num_features] + [1]*ndim))
self.bias = nn.Parameter(torch.zeros([1, num_features] + [1]*ndim))
if learnable_eps:
self.eps = nn.Parameter(torch.as_tensor(eps))
else:
self.register_buffer('eps', torch.as_tensor(eps))
def forward(self, x):
# normalize
nu2 = x.pow(2).mean(self.mean_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + self.eps.abs())
# modulation
x = x * self.weight + self.bias
return x
def extra_repr(self):
return 'num_features={}, init_eps={}, ndim={}'.format(
self.num_features, self.init_eps, self.ndim)
FilterResponseNorm1d = partial(FilterResponseNorm, ndim=1, learnable_eps=True)
FilterResponseNorm2d = partial(FilterResponseNorm, ndim=2)
@register("ada_in")
class AdaIN(nn.Module):
"""
Adaptive Instance normalization.
reference: https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
"""
def __init__(self, n_channels=3, code=10):
super().__init__()
self.norm = nn.InstanceNorm2d(n_channels, affine=False, eps=1e-8)
self.A = ScaledLinear(code, n_channels * 2)
# StyleGAN
# self.A.linear.bias.data = torch.cat([torch.ones(n_channels), torch.zeros(n_channels)])
def forward(self, x, style):
"""
x - (N x C x H x W)
style - (N x (Cx2))
"""
# Project project style vector(w) to mu, sigma and reshape it 2D->4D to allow channel-wise operations
style = self.A(style)
y = style.view(style.shape[0], 2, style.shape[1]//2).unsqueeze(3).unsqueeze(4)
x = self.norm(x)
return torch.addcmul(y[:, 1], value=1., tensor1=y[:, 0] + 1, tensor2 = x)
@register("ada_pn")
class AdaPN(nn.Module):
"""
Pixelwise feature vector normalization.
reference: https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
"""
def __init__(self, n_channels=3, code=10):
super().__init__()
self.A = ScaledLinear(code, n_channels * 2)
def forward(self, x, style, alpha=1e-8):
"""
x - (N x C x H x W)
style - (N x (Cx2))
:param x: input activations volume
:param alpha: small number for numerical stability
:return: y => pixel normalized activations
"""
# Project project style vector(w) to mu, sigma and reshape it 2D->4D to allow channel-wise operations
style = self.A(style)
z = style.view(style.shape[0], 2, style.shape[1]//2).unsqueeze(3).unsqueeze(4)
# original PixelNorm
y = torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8) # [N1HW]
y = x / y # normalize the input x volume
# addcmul like in AdaIN
return torch.addcmul(z[:, 1], value=1., tensor1=z[:, 0] + 1, tensor2=y)
@register("pixel_norm")
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.rsqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-8) | 36.39738 | 150 | 0.604319 | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from .registry import register
@register("conditional_bn_2d")
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.gamma = nn.Embedding(num_classes, num_features)
self.beta = nn.Embedding(num_classes, num_features)
torch.nn.init.ones_(self.gamma.weight)
torch.nn.init.zeros_(self.beta.weight)
def forward(self, x, y):
out = self.bn(x)
gamma = self.gamma(y)
beta = self.beta(y)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
@register("layer_norm")
class LayerNorm(nn.Module):
def __init__(self,
normalized_shape=None,
eps=1e-6,
data_format="channels_last"):
assert normalized_shape is not None
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x):
if self.data_format == "channels_last":
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
@register("spectral_norm")
class SpectralNorm(nn.Module):
def __init__(self, module=None, name='weight', power_iterations=1):
super().__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = nn.Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
@register("filter_response_norm")
class FilterResponseNorm(nn.Module):
def __init__(self, num_features=0, ndim=0, eps=None, learnable_eps=False):
super().__init__()
if eps is None:
if learnable_eps:
eps = 1e-4
else:
eps = 1e-6
self.num_features = num_features
self.init_eps = eps
self.learnable_eps = learnable_eps
self.ndim = ndim
self.mean_dims = list(range(2, 2+ndim))
self.weight = nn.Parameter(torch.ones([1, num_features] + [1]*ndim))
self.bias = nn.Parameter(torch.zeros([1, num_features] + [1]*ndim))
if learnable_eps:
self.eps = nn.Parameter(torch.as_tensor(eps))
else:
self.register_buffer('eps', torch.as_tensor(eps))
def forward(self, x):
nu2 = x.pow(2).mean(self.mean_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + self.eps.abs())
x = x * self.weight + self.bias
return x
def extra_repr(self):
return 'num_features={}, init_eps={}, ndim={}'.format(
self.num_features, self.init_eps, self.ndim)
FilterResponseNorm1d = partial(FilterResponseNorm, ndim=1, learnable_eps=True)
FilterResponseNorm2d = partial(FilterResponseNorm, ndim=2)
@register("ada_in")
class AdaIN(nn.Module):
def __init__(self, n_channels=3, code=10):
super().__init__()
self.norm = nn.InstanceNorm2d(n_channels, affine=False, eps=1e-8)
self.A = ScaledLinear(code, n_channels * 2)
def forward(self, x, style):
style = self.A(style)
y = style.view(style.shape[0], 2, style.shape[1]//2).unsqueeze(3).unsqueeze(4)
x = self.norm(x)
return torch.addcmul(y[:, 1], value=1., tensor1=y[:, 0] + 1, tensor2 = x)
@register("ada_pn")
class AdaPN(nn.Module):
def __init__(self, n_channels=3, code=10):
super().__init__()
self.A = ScaledLinear(code, n_channels * 2)
def forward(self, x, style, alpha=1e-8):
style = self.A(style)
z = style.view(style.shape[0], 2, style.shape[1]//2).unsqueeze(3).unsqueeze(4)
y = torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
y = x / y
return torch.addcmul(z[:, 1], value=1., tensor1=z[:, 0] + 1, tensor2=y)
@register("pixel_norm")
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.rsqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-8) | true | true |
1c35fc9680322e27a761f00327d74d3035163235 | 6,755 | py | Python | Django_mall/Django_mall/apps/users/serializers.py | gottuantuan/Django_shopping | a7d435fe0838bc682c4cd462778e486b5eaade08 | [
"MIT"
] | null | null | null | Django_mall/Django_mall/apps/users/serializers.py | gottuantuan/Django_shopping | a7d435fe0838bc682c4cd462778e486b5eaade08 | [
"MIT"
] | null | null | null | Django_mall/Django_mall/apps/users/serializers.py | gottuantuan/Django_shopping | a7d435fe0838bc682c4cd462778e486b5eaade08 | [
"MIT"
] | null | null | null | from rest_framework import serializers
import re
from rest_framework_jwt.settings import api_settings
from django_redis import get_redis_connection
from goods.models import SKU
from .models import User, Address
from celery_tasks.email.tasks import send_verify_email
class CreateUserSerializer(serializers.ModelSerializer):
"""创建用户的序列化器"""
# 指定模型类以外的字段
password2 = serializers.CharField(label='确认密码', write_only=True)
sms_code = serializers.CharField(label='短信验证码', write_only=True)
allow = serializers.CharField(label='同意协议', write_only=True)
token = serializers.CharField(label='KWT登录状态token', read_only=True)
class Meta:
model = User
# ['id', 'username', 'mobile'] : 输出 read_only
# ['password', 'password2', 'sms_code', 'allow'] : 输入 write_only
fields = ['id', 'username', 'mobile', 'password', 'password2', 'sms_code', 'allow', 'token']
# 追加额外的校验
extra_kwargs = {
'username': {
'min_length': 5,
'max_length': 20,
'error_messages': {
'min_length': '仅允许5-20个字符的用户名',
'max_length': '仅允许5-20个字符的用户名',
}
},
'password': {
'write_only': True,
'min_length': 8,
'max_length': 20,
'error_messages': {
'min_length': '仅允许8-20个字符的密码',
'max_length': '仅允许8-20个字符的密码',
}
}
}
def validate_mobile(self, value):
"""验证手机号"""
if not re.match(r'^1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def validate_allow(self, value):
"""检验用户是否同意协议"""
if value != 'true':
raise serializers.ValidationError('请同意用户协议')
return value
def validate(self, data):
# 判断两次密码
if data['password'] != data['password2']:
raise serializers.ValidationError('两次密码不一致')
# 判断短信验证码
redis_conn = get_redis_connection('verify_codes')
mobile = data['mobile']
real_sms_code = redis_conn.get('sms_%s' % mobile)
if real_sms_code is None:
raise serializers.ValidationError('无效的短信验证码')
if data['sms_code'] != real_sms_code.decode():
raise serializers.ValidationError('短信验证码错误')
return data
def create(self, validated_data):
"""
创建用户
重写create的目的是为了剔除掉,为只读的字段,但是不在数据库中的额字段
"""
# 移除数据库模型类中不存在的属性
del validated_data['password2']
del validated_data['sms_code']
del validated_data['allow']
user = super().create(validated_data)
# 调用django的认证系统加密密码
user.set_password(validated_data['password'])
user.save()
# from rest_framework.settings import api_settings
# # 在注册数据保存完成,响应注册数据之前,生成JWT token
# jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
# jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
#
# # 使用当前的注册用户user生成载荷,该载荷内部会有{"username":"", "user_id":"", "email":""}
# payload = jwt_payload_handler(user)
# # JWT token
# token = jwt_encode_handler(payload)
#
# # 将token临时绑定到user模型对象,顺便响应给用户
# user.token = tokenjwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
# 使用当前的注册用户user生成载荷,该载荷内部会有{"username":"", "user_id":"", "email":""}
payload = jwt_payload_handler(user)
# JWT token
token = jwt_encode_handler(payload)
# 将token临时绑定到user模型对象,顺便响应给用户
user.token = token
return user
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'mobile', 'email', 'email_active')
class EmailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'email')
extra_kwargs = {
'email': {
'required': True
}
}
def update(self, instance, validated_data):
instance.email = validated_data.get('email')
instance.save()
verify_url = instance.generate_verify_email_url()
# 发送验证邮件
send_verify_email.delay(instance.email, verify_url)
return instance
class UserAddressSerializer(serializers.ModelSerializer):
"""
用户地址序列化器
"""
province = serializers.StringRelatedField(read_only=True)
city = serializers.StringRelatedField(read_only=True)
district = serializers.StringRelatedField(read_only=True)
province_id = serializers.IntegerField(label='省ID', required=True)
city_id = serializers.IntegerField(label='市ID', required=True)
district_id = serializers.IntegerField(label='区ID', required=True)
class Meta:
model = Address
exclude = ('user', 'is_deleted', 'create_time', 'update_time')
def validate_mobile(self, value):
"""
验证手机号
"""
if not re.match(r'^1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def create(self, validated_data):
"""
保存
"""
validated_data['user'] = self.context['request'].user
return super().create(validated_data)
class AddressTitleSerializer(serializers.ModelSerializer):
"""
地址标题
"""
class Meta:
model = Address
fields = ('title',)
class UserBrowseHistorySerializer(serializers.Serializer):
"""添加用户浏览记录序列化器"""
sku_id = serializers.IntegerField(label='sku_id', min_value=1)
#校验id是否合法
def validate_sku_id(self, value):
'''
校验单个字段
:param value: sukid
:return: value
'''
try:
SKU.objects.get(id=value)
except SKU.DoesNotExist:
raise serializers.ValidationError('无效sku-id')
return value
def create(self, validated_data):
"""CreateAPIView在调用save()时调用的
将用户的浏览记录的数据存储在redis
"""
# 读取当前登录用户的user_id
user_id = self.context['request'].user.id
sku_id = validated_data.get('sku_id')
redis_conn = get_redis_connection('history')
pl = redis_conn.pipeline()
# 去重
pl.lrem('history_%s' % user_id, 0, sku_id)
# 存储
pl.lpush('history_%s' % user_id, sku_id)
# 截取最前面的五个
pl.ltrim('history_%s' % user_id, 0, 4)
# 记得执行
pl.execute()
# 返回
return validated_data
| 26.912351 | 100 | 0.593634 | from rest_framework import serializers
import re
from rest_framework_jwt.settings import api_settings
from django_redis import get_redis_connection
from goods.models import SKU
from .models import User, Address
from celery_tasks.email.tasks import send_verify_email
class CreateUserSerializer(serializers.ModelSerializer):
password2 = serializers.CharField(label='确认密码', write_only=True)
sms_code = serializers.CharField(label='短信验证码', write_only=True)
allow = serializers.CharField(label='同意协议', write_only=True)
token = serializers.CharField(label='KWT登录状态token', read_only=True)
class Meta:
model = User
fields = ['id', 'username', 'mobile', 'password', 'password2', 'sms_code', 'allow', 'token']
extra_kwargs = {
'username': {
'min_length': 5,
'max_length': 20,
'error_messages': {
'min_length': '仅允许5-20个字符的用户名',
'max_length': '仅允许5-20个字符的用户名',
}
},
'password': {
'write_only': True,
'min_length': 8,
'max_length': 20,
'error_messages': {
'min_length': '仅允许8-20个字符的密码',
'max_length': '仅允许8-20个字符的密码',
}
}
}
def validate_mobile(self, value):
if not re.match(r'^1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def validate_allow(self, value):
if value != 'true':
raise serializers.ValidationError('请同意用户协议')
return value
def validate(self, data):
if data['password'] != data['password2']:
raise serializers.ValidationError('两次密码不一致')
redis_conn = get_redis_connection('verify_codes')
mobile = data['mobile']
real_sms_code = redis_conn.get('sms_%s' % mobile)
if real_sms_code is None:
raise serializers.ValidationError('无效的短信验证码')
if data['sms_code'] != real_sms_code.decode():
raise serializers.ValidationError('短信验证码错误')
return data
def create(self, validated_data):
del validated_data['password2']
del validated_data['sms_code']
del validated_data['allow']
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
user.token = token
return user
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'mobile', 'email', 'email_active')
class EmailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'email')
extra_kwargs = {
'email': {
'required': True
}
}
def update(self, instance, validated_data):
instance.email = validated_data.get('email')
instance.save()
verify_url = instance.generate_verify_email_url()
send_verify_email.delay(instance.email, verify_url)
return instance
class UserAddressSerializer(serializers.ModelSerializer):
province = serializers.StringRelatedField(read_only=True)
city = serializers.StringRelatedField(read_only=True)
district = serializers.StringRelatedField(read_only=True)
province_id = serializers.IntegerField(label='省ID', required=True)
city_id = serializers.IntegerField(label='市ID', required=True)
district_id = serializers.IntegerField(label='区ID', required=True)
class Meta:
model = Address
exclude = ('user', 'is_deleted', 'create_time', 'update_time')
def validate_mobile(self, value):
if not re.match(r'^1[3-9]\d{9}$', value):
raise serializers.ValidationError('手机号格式错误')
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
return super().create(validated_data)
class AddressTitleSerializer(serializers.ModelSerializer):
class Meta:
model = Address
fields = ('title',)
class UserBrowseHistorySerializer(serializers.Serializer):
sku_id = serializers.IntegerField(label='sku_id', min_value=1)
def validate_sku_id(self, value):
try:
SKU.objects.get(id=value)
except SKU.DoesNotExist:
raise serializers.ValidationError('无效sku-id')
return value
def create(self, validated_data):
user_id = self.context['request'].user.id
sku_id = validated_data.get('sku_id')
redis_conn = get_redis_connection('history')
pl = redis_conn.pipeline()
pl.lrem('history_%s' % user_id, 0, sku_id)
pl.lpush('history_%s' % user_id, sku_id)
pl.ltrim('history_%s' % user_id, 0, 4)
pl.execute()
return validated_data
| true | true |
1c35fd08ec6e78d76324b5a01eaaf31ac343cabd | 2,495 | py | Python | LHconnectivity/version.py | benjamingarzon/LHconnectivity | 1c960e525ed93a056bed3eaa74db28f31212271a | [
"MIT"
] | 7 | 2019-03-11T12:34:21.000Z | 2021-05-07T14:49:23.000Z | LHconnectivity/version.py | benjamingarzon/LHconnectivity | 1c960e525ed93a056bed3eaa74db28f31212271a | [
"MIT"
] | 67 | 2018-08-20T21:49:09.000Z | 2019-12-04T17:52:03.000Z | LHconnectivity/version.py | benjamingarzon/LHconnectivity | 1c960e525ed93a056bed3eaa74db28f31212271a | [
"MIT"
] | 6 | 2018-10-25T17:41:25.000Z | 2020-12-26T18:09:53.000Z | from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "shablona: a template for small scientific Python projects"
# Long description will go up on the pypi page
long_description = """
Shablona
========
Shablona is a template project for small scientific Python projects.
It contains software implementations of an analysis of some simple data, but
more importantly, it contains infrastructure for testing, documentation,
continuous integration and deployment, which can be easily adapted
to use in other projects.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/uwescience/shablona/blob/master/README.md
License
=======
``shablona`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Ariel Rokem, The University of Washington
eScience Institute.
"""
NAME = "shablona"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/uwescience/shablona"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'shablona': [pjoin('data', '*')]}
REQUIRES = ["numpy"]
| 32.828947 | 77 | 0.734269 | from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
_version_major = 0
_version_minor = 1
_version_micro = ''
_version_extra = 'dev'
n_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
description = "shablona: a template for small scientific Python projects"
long_description = """
Shablona
========
Shablona is a template project for small scientific Python projects.
It contains software implementations of an analysis of some simple data, but
more importantly, it contains infrastructure for testing, documentation,
continuous integration and deployment, which can be easily adapted
to use in other projects.
To get started using these components in your own software, please go to the
repository README_.
.. _README: https://github.com/uwescience/shablona/blob/master/README.md
License
=======
``shablona`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Ariel Rokem, The University of Washington
eScience Institute.
"""
NAME = "shablona"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/uwescience/shablona"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'shablona': [pjoin('data', '*')]}
REQUIRES = ["numpy"]
| true | true |
1c35fd99530e6e6cbcf496ac93dfdeaceb8567c3 | 245 | py | Python | src/direpack/plot/__init__.py | zedian/direpack | 507a5e21937a6e4032aa0d3e5c4a79e1349343d8 | [
"MIT"
] | 23 | 2020-04-04T20:48:25.000Z | 2022-01-07T19:42:22.000Z | src/direpack/plot/__init__.py | zedian/direpack | 507a5e21937a6e4032aa0d3e5c4a79e1349343d8 | [
"MIT"
] | 8 | 2020-04-26T19:24:01.000Z | 2021-05-20T13:29:31.000Z | src/direpack/plot/__init__.py | zedian/direpack | 507a5e21937a6e4032aa0d3e5c4a79e1349343d8 | [
"MIT"
] | 9 | 2020-04-11T11:25:16.000Z | 2021-04-27T09:38:50.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 22 12:17:17 2018
@author: Sven Serneels, Ponalytics
"""
__name__ = "plot"
__author__ = "Sven Serneels"
__license__ = "MIT"
__version__ = "0.9.0"
__date__ = "2020-04-18"
| 13.611111 | 35 | 0.64898 |
__name__ = "plot"
__author__ = "Sven Serneels"
__license__ = "MIT"
__version__ = "0.9.0"
__date__ = "2020-04-18"
| true | true |
1c35fddf8331755623b314741d4a405baeff9969 | 50,118 | py | Python | discord/client.py | Rayster4/discord.py-1.7.3 | 4a4c60a8fab7bf00eac2e9ffbb5621f68a4c6b6f | [
"MIT"
] | 21 | 2021-03-29T05:49:35.000Z | 2022-03-18T09:02:34.000Z | discord/client.py | Rayster4/discord.py-1.7.3 | 4a4c60a8fab7bf00eac2e9ffbb5621f68a4c6b6f | [
"MIT"
] | 15 | 2021-04-10T11:08:09.000Z | 2022-03-22T07:48:58.000Z | discord/client.py | Rayster4/discord.py-1.7.3 | 4a4c60a8fab7bf00eac2e9ffbb5621f68a4c6b6f | [
"MIT"
] | 31 | 2021-03-29T05:54:57.000Z | 2022-03-22T16:58:57.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import logging
import signal
import sys
import traceback
import aiohttp
from .user import User, Profile
from .invite import Invite
from .template import Template
from .widget import Widget
from .guild import Guild
from .channel import _channel_factory
from .enums import ChannelType
from .mentions import AllowedMentions
from .errors import *
from .enums import Status, VoiceRegion
from .gateway import *
from .activity import BaseActivity, create_activity
from .voice_client import VoiceClient
from .http import HTTPClient
from .state import ConnectionState
from . import utils
from .object import Object
from .backoff import ExponentialBackoff
from .webhook import Webhook
from .iterators import GuildIterator
from .appinfo import AppInfo
log = logging.getLogger(__name__)
def _cancel_tasks(loop):
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
tasks = {t for t in task_retriever(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop):
try:
_cancel_tasks(loop)
if sys.version_info >= (3, 6):
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class _ClientEventTask(asyncio.Task):
def __init__(self, original_coro, event_name, coro, *, loop):
super().__init__(coro, loop=loop)
self.__event_name = event_name
self.__original_coro = original_coro
def __repr__(self):
info = [
('state', self._state.lower()),
('event', self.__event_name),
('coro', repr(self.__original_coro)),
]
if self._exception is not None:
info.append(('exception', repr(self._exception)))
return '<ClientEventTask {}>'.format(' '.join('%s=%s' % t for t in info))
class Client:
r"""Represents a client connection that connects to Discord.
This class is used to interact with the Discord WebSocket and API.
A number of options can be passed to the :class:`Client`.
Parameters
-----------
max_messages: Optional[:class:`int`]
The maximum number of messages to store in the internal message cache.
This defaults to ``1000``. Passing in ``None`` disables the message cache.
.. versionchanged:: 1.3
Allow disabling the message cache and change the default size to ``1000``.
loop: Optional[:class:`asyncio.AbstractEventLoop`]
The :class:`asyncio.AbstractEventLoop` to use for asynchronous operations.
Defaults to ``None``, in which case the default event loop is used via
:func:`asyncio.get_event_loop()`.
connector: :class:`aiohttp.BaseConnector`
The connector to use for connection pooling.
proxy: Optional[:class:`str`]
Proxy URL.
proxy_auth: Optional[:class:`aiohttp.BasicAuth`]
An object that represents proxy HTTP Basic Authorization.
shard_id: Optional[:class:`int`]
Integer starting at ``0`` and less than :attr:`.shard_count`.
shard_count: Optional[:class:`int`]
The total number of shards.
intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
.. versionadded:: 1.5
member_cache_flags: :class:`MemberCacheFlags`
Allows for finer control over how the library caches members.
If not given, defaults to cache as much as possible with the
currently selected intents.
.. versionadded:: 1.5
fetch_offline_members: :class:`bool`
A deprecated alias of ``chunk_guilds_at_startup``.
chunk_guilds_at_startup: :class:`bool`
Indicates if :func:`.on_ready` should be delayed to chunk all guilds
at start-up if necessary. This operation is incredibly slow for large
amounts of guilds. The default is ``True`` if :attr:`Intents.members`
is ``True``.
.. versionadded:: 1.5
status: Optional[:class:`.Status`]
A status to start your presence with upon logging on to Discord.
activity: Optional[:class:`.BaseActivity`]
An activity to start your presence with upon logging on to Discord.
allowed_mentions: Optional[:class:`AllowedMentions`]
Control how the client handles mentions by default on every message sent.
.. versionadded:: 1.4
heartbeat_timeout: :class:`float`
The maximum numbers of seconds before timing out and restarting the
WebSocket in the case of not receiving a HEARTBEAT_ACK. Useful if
processing the initial packets take too long to the point of disconnecting
you. The default timeout is 60 seconds.
guild_ready_timeout: :class:`float`
The maximum number of seconds to wait for the GUILD_CREATE stream to end before
preparing the member cache and firing READY. The default timeout is 2 seconds.
.. versionadded:: 1.4
guild_subscriptions: :class:`bool`
Whether to dispatch presence or typing events. Defaults to ``True``.
.. versionadded:: 1.3
.. warning::
If this is set to ``False`` then the following features will be disabled:
- No user related updates (:func:`on_user_update` will not dispatch)
- All member related events will be disabled.
- :func:`on_member_update`
- :func:`on_member_join`
- :func:`on_member_remove`
- Typing events will be disabled (:func:`on_typing`).
- If ``fetch_offline_members`` is set to ``False`` then the user cache will not exist.
This makes it difficult or impossible to do many things, for example:
- Computing permissions
- Querying members in a voice channel via :attr:`VoiceChannel.members` will be empty.
- Most forms of receiving :class:`Member` will be
receiving :class:`User` instead, except for message events.
- :attr:`Guild.owner` will usually resolve to ``None``.
- :meth:`Guild.get_member` will usually be unavailable.
- Anything that involves using :class:`Member`.
- :attr:`users` will not be as populated.
- etc.
In short, this makes it so the only member you can reliably query is the
message author. Useful for bots that do not require any state.
assume_unsync_clock: :class:`bool`
Whether to assume the system clock is unsynced. This applies to the ratelimit handling
code. If this is set to ``True``, the default, then the library uses the time to reset
a rate limit bucket given by Discord. If this is ``False`` then your system clock is
used to calculate how long to sleep for. If this is set to ``False`` it is recommended to
sync your system clock to Google's NTP server.
.. versionadded:: 1.3
Attributes
-----------
ws
The websocket gateway the client is currently connected to. Could be ``None``.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the client uses for HTTP requests and websocket operations.
"""
def __init__(self, *, loop=None, **options):
self.ws = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = {}
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
connector = options.pop('connector', None)
proxy = options.pop('proxy', None)
proxy_auth = options.pop('proxy_auth', None)
unsync_clock = options.pop('assume_unsync_clock', True)
self.http = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop)
self._handlers = {
'ready': self._handle_ready
}
self._hooks = {
'before_identify': self._call_before_identify_hook
}
self._connection = self._get_state(**options)
self._connection.shard_count = self.shard_count
self._closed = False
self._ready = asyncio.Event()
self._connection._get_websocket = self._get_websocket
self._connection._get_client = lambda: self
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
# internals
def _get_websocket(self, guild_id=None, *, shard_id=None):
return self.ws
def _get_state(self, **options):
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers,
hooks=self._hooks, syncer=self._syncer, http=self.http, loop=self.loop, **options)
async def _syncer(self, guilds):
await self.ws.request_sync(guilds)
def _handle_ready(self):
self._ready.set()
@property
def latency(self):
""":class:`float`: Measures latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord WebSocket protocol latency.
"""
ws = self.ws
return float('nan') if not ws else ws.latency
def is_ws_ratelimited(self):
""":class:`bool`: Whether the websocket is currently rate limited.
This can be useful to know when deciding whether you should query members
using HTTP or via the gateway.
.. versionadded:: 1.6
"""
if self.ws:
return self.ws.is_ratelimited()
return False
@property
def user(self):
"""Optional[:class:`.ClientUser`]: Represents the connected client. ``None`` if not logged in."""
return self._connection.user
@property
def guilds(self):
"""List[:class:`.Guild`]: The guilds that the connected client is a member of."""
return self._connection.guilds
@property
def emojis(self):
"""List[:class:`.Emoji`]: The emojis that the connected client has."""
return self._connection.emojis
@property
def cached_messages(self):
"""Sequence[:class:`.Message`]: Read-only list of messages the connected client has cached.
.. versionadded:: 1.1
"""
return utils.SequenceProxy(self._connection._messages or [])
@property
def private_channels(self):
"""List[:class:`.abc.PrivateChannel`]: The private channels that the connected client is participating on.
.. note::
This returns only up to 128 most recent private channels due to an internal working
on how Discord deals with private channels.
"""
return self._connection.private_channels
@property
def voice_clients(self):
"""List[:class:`.VoiceProtocol`]: Represents a list of voice connections.
These are usually :class:`.VoiceClient` instances.
"""
return self._connection.voice_clients
def is_ready(self):
""":class:`bool`: Specifies if the client's internal cache is ready for use."""
return self._ready.is_set()
async def _run_event(self, coro, event_name, *args, **kwargs):
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def _schedule_event(self, coro, event_name, *args, **kwargs):
wrapped = self._run_event(coro, event_name, *args, **kwargs)
# Schedules the task
return _ClientEventTask(original_coro=coro, event_name=event_name, coro=wrapped, loop=self.loop)
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event %s', event)
method = 'on_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
self._schedule_event(coro, method, *args, **kwargs)
async def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
Check :func:`~discord.on_error` for more details.
"""
print('Ignoring exception in {}'.format(event_method), file=sys.stderr)
traceback.print_exc()
@utils.deprecated('Guild.chunk')
async def request_offline_members(self, *guilds):
r"""|coro|
Requests previously offline members from the guild to be filled up
into the :attr:`.Guild.members` cache. This function is usually not
called. It should only be used if you have the ``fetch_offline_members``
parameter set to ``False``.
When the client logs on and connects to the websocket, Discord does
not provide the library with offline members if the number of members
in the guild is larger than 250. You can check if a guild is large
if :attr:`.Guild.large` is ``True``.
.. warning::
This method is deprecated. Use :meth:`Guild.chunk` instead.
Parameters
-----------
\*guilds: :class:`.Guild`
An argument list of guilds to request offline members for.
Raises
-------
:exc:`.InvalidArgument`
If any guild is unavailable in the collection.
"""
if any(g.unavailable for g in guilds):
raise InvalidArgument('An unavailable guild was passed.')
for guild in guilds:
await self._connection.chunk_guild(guild)
# hooks
async def _call_before_identify_hook(self, shard_id, *, initial=False):
# This hook is an internal hook that actually calls the public one.
# It allows the library to have its own hook without stepping on the
# toes of those who need to override their own hook.
await self.before_identify_hook(shard_id, initial=initial)
async def before_identify_hook(self, shard_id, *, initial=False):
"""|coro|
A hook that is called before IDENTIFYing a session. This is useful
if you wish to have more control over the synchronization of multiple
IDENTIFYing clients.
The default implementation sleeps for 5 seconds.
.. versionadded:: 1.4
Parameters
------------
shard_id: :class:`int`
The shard ID that requested being IDENTIFY'd
initial: :class:`bool`
Whether this IDENTIFY is the first initial IDENTIFY.
"""
if not initial:
await asyncio.sleep(5.0)
# login state management
async def login(self, token, *, bot=True):
"""|coro|
Logs in the client with the specified credentials.
This function can be used in two different ways.
.. warning::
Logging on with a user token is against the Discord
`Terms of Service <https://support.discord.com/hc/en-us/articles/115002192352>`_
and doing so might potentially get your account banned.
Use this at your own risk.
Parameters
-----------
token: :class:`str`
The authentication token. Do not prefix this token with
anything as the library will do it for you.
bot: :class:`bool`
Keyword argument that specifies if the account logging on is a bot
token or not.
.. deprecated:: 1.7
Raises
------
:exc:`.LoginFailure`
The wrong credentials are passed.
:exc:`.HTTPException`
An unknown HTTP related error occurred,
usually when it isn't 200 or the known incorrect credentials
passing status code.
"""
log.info('logging in using static token')
await self.http.static_login(token.strip(), bot=bot)
self._connection.is_bot = bot
@utils.deprecated('Client.close')
async def logout(self):
"""|coro|
Logs out of Discord and closes all connections.
.. deprecated:: 1.7
.. note::
This is just an alias to :meth:`close`. If you want
to do extraneous cleanup when subclassing, it is suggested
to override :meth:`close` instead.
"""
await self.close()
async def connect(self, *, reconnect=True):
"""|coro|
Creates a websocket connection and lets the websocket listen
to messages from Discord. This is a loop that runs the entire
event system and miscellaneous aspects of the library. Control
is not resumed until the WebSocket connection is terminated.
Parameters
-----------
reconnect: :class:`bool`
If we should attempt reconnecting, either due to internet
failure or a specific failure on Discord's part. Certain
disconnects that lead to bad state will not be handled (such as
invalid sharding payloads or bad tokens).
Raises
-------
:exc:`.GatewayNotFound`
If the gateway to connect to Discord is not found. Usually if this
is thrown then there is a Discord API outage.
:exc:`.ConnectionClosed`
The websocket connection has been terminated.
"""
backoff = ExponentialBackoff()
ws_params = {
'initial': True,
'shard_id': self.shard_id,
}
while not self.is_closed():
try:
coro = DiscordWebSocket.from_client(self, **ws_params)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
ws_params['initial'] = False
while True:
await self.ws.poll_event()
except ReconnectWebSocket as e:
log.info('Got a request to %s the websocket.', e.op)
self.dispatch('disconnect')
ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id)
continue
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError) as exc:
self.dispatch('disconnect')
if not reconnect:
await self.close()
if isinstance(exc, ConnectionClosed) and exc.code == 1000:
# clean close, don't re-raise this
return
raise
if self.is_closed():
return
# If we get connection reset by peer then try to RESUME
if isinstance(exc, OSError) and exc.errno in (54, 10054):
ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id)
continue
# We should only get this when an unhandled close code happens,
# such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc)
# sometimes, discord sends us 1000 for unknown reasons so we should reconnect
# regardless and rely on is_closed instead
if isinstance(exc, ConnectionClosed):
if exc.code == 4014:
raise PrivilegedIntentsRequired(exc.shard_id) from None
if exc.code != 1000:
await self.close()
raise
retry = backoff.delay()
log.exception("Attempting a reconnect in %.2fs", retry)
await asyncio.sleep(retry)
# Always try to RESUME the connection
# If the connection is not RESUME-able then the gateway will invalidate the session.
# This is apparently what the official Discord client does.
ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id)
async def close(self):
"""|coro|
Closes the connection to Discord.
"""
if self._closed:
return
await self.http.close()
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close(code=1000)
self._ready.clear()
def clear(self):
"""Clears the internal state of the bot.
After this, the bot can be considered "re-opened", i.e. :meth:`is_closed`
and :meth:`is_ready` both return ``False`` along with the bot's internal
cache cleared.
"""
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
async def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
Raises
-------
TypeError
An unexpected keyword argument was received.
"""
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
if kwargs:
raise TypeError("unexpected keyword argument(s) %s" % list(kwargs.keys()))
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect)
def run(self, *args, **kwargs):
"""A blocking call that abstracts away the event loop
initialisation from you.
If you want more control over the event loop then this
function should not be used. Use :meth:`start` coroutine
or :meth:`connect` + :meth:`login`.
Roughly Equivalent to: ::
try:
loop.run_until_complete(start(*args, **kwargs))
except KeyboardInterrupt:
loop.run_until_complete(close())
# cancel all tasks lingering
finally:
loop.close()
.. warning::
This function must be the last function to call due to the fact that it
is blocking. That means that registration of events or anything being
called after this function call will not execute until it returns.
"""
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Received signal to terminate bot and event loop.')
finally:
future.remove_done_callback(stop_loop_on_completion)
log.info('Cleaning up tasks.')
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
# properties
def is_closed(self):
""":class:`bool`: Indicates if the websocket connection is closed."""
return self._closed
@property
def activity(self):
"""Optional[:class:`.BaseActivity`]: The activity being used upon
logging in.
"""
return create_activity(self._connection._activity)
@activity.setter
def activity(self, value):
if value is None:
self._connection._activity = None
elif isinstance(value, BaseActivity):
self._connection._activity = value.to_dict()
else:
raise TypeError('activity must derive from BaseActivity.')
@property
def allowed_mentions(self):
"""Optional[:class:`~discord.AllowedMentions`]: The allowed mention configuration.
.. versionadded:: 1.4
"""
return self._connection.allowed_mentions
@allowed_mentions.setter
def allowed_mentions(self, value):
if value is None or isinstance(value, AllowedMentions):
self._connection.allowed_mentions = value
else:
raise TypeError('allowed_mentions must be AllowedMentions not {0.__class__!r}'.format(value))
@property
def intents(self):
""":class:`~discord.Intents`: The intents configured for this connection.
.. versionadded:: 1.5
"""
return self._connection.intents
# helpers/getters
@property
def users(self):
"""List[:class:`~discord.User`]: Returns a list of all the users the bot can see."""
return list(self._connection._users.values())
def get_channel(self, id):
"""Returns a channel with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]]
The returned channel or ``None`` if not found.
"""
return self._connection.get_channel(id)
def get_guild(self, id):
"""Returns a guild with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Guild`]
The guild or ``None`` if not found.
"""
return self._connection._get_guild(id)
def get_user(self, id):
"""Returns a user with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`~discord.User`]
The user or ``None`` if not found.
"""
return self._connection.get_user(id)
def get_emoji(self, id):
"""Returns an emoji with the given ID.
Parameters
-----------
id: :class:`int`
The ID to search for.
Returns
--------
Optional[:class:`.Emoji`]
The custom emoji or ``None`` if not found.
"""
return self._connection.get_emoji(id)
def get_all_channels(self):
"""A generator that retrieves every :class:`.abc.GuildChannel` the client can 'access'.
This is equivalent to: ::
for guild in client.guilds:
for channel in guild.channels:
yield channel
.. note::
Just because you receive a :class:`.abc.GuildChannel` does not mean that
you can communicate in said channel. :meth:`.abc.GuildChannel.permissions_for` should
be used for that.
Yields
------
:class:`.abc.GuildChannel`
A channel the client can 'access'.
"""
for guild in self.guilds:
for channel in guild.channels:
yield channel
def get_all_members(self):
"""Returns a generator with every :class:`.Member` the client can see.
This is equivalent to: ::
for guild in client.guilds:
for member in guild.members:
yield member
Yields
------
:class:`.Member`
A member the client can see.
"""
for guild in self.guilds:
for member in guild.members:
yield member
# listeners/waiters
async def wait_until_ready(self):
"""|coro|
Waits until the client's internal cache is all ready.
"""
await self._ready.wait()
def wait_for(self, event, *, check=None, timeout=None):
"""|coro|
Waits for a WebSocket event to be dispatched.
This could be used to wait for a user to reply to a message,
or to react to a message, or to edit a message in a self-contained
way.
The ``timeout`` parameter is passed onto :func:`asyncio.wait_for`. By default,
it does not timeout. Note that this does propagate the
:exc:`asyncio.TimeoutError` for you in case of timeout and is provided for
ease of use.
In case the event returns multiple arguments, a :class:`tuple` containing those
arguments is returned instead. Please check the
:ref:`documentation <discord-api-events>` for a list of events and their
parameters.
This function returns the **first event that meets the requirements**.
Examples
---------
Waiting for a user reply: ::
@client.event
async def on_message(message):
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
msg = await client.wait_for('message', check=check)
await channel.send('Hello {.author}!'.format(msg))
Waiting for a thumbs up reaction from the message author: ::
@client.event
async def on_message(message):
if message.content.startswith('$thumb'):
channel = message.channel
await channel.send('Send me that \N{THUMBS UP SIGN} reaction, mate')
def check(reaction, user):
return user == message.author and str(reaction.emoji) == '\N{THUMBS UP SIGN}'
try:
reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await channel.send('\N{THUMBS DOWN SIGN}')
else:
await channel.send('\N{THUMBS UP SIGN}')
Parameters
------------
event: :class:`str`
The event name, similar to the :ref:`event reference <discord-api-events>`,
but without the ``on_`` prefix, to wait for.
check: Optional[Callable[..., :class:`bool`]]
A predicate to check what to wait for. The arguments must meet the
parameters of the event being waited for.
timeout: Optional[:class:`float`]
The number of seconds to wait before timing out and raising
:exc:`asyncio.TimeoutError`.
Raises
-------
asyncio.TimeoutError
If a timeout is provided and it was reached.
Returns
--------
Any
Returns no arguments, a single argument, or a :class:`tuple` of multiple
arguments that mirrors the parameters passed in the
:ref:`event reference <discord-api-events>`.
"""
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = event.lower()
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
# event registration
def event(self, coro):
"""A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a :ref:`coroutine <coroutine>`, if not, :exc:`TypeError` is raised.
Example
---------
.. code-block:: python3
@client.event
async def on_ready():
print('Ready!')
Raises
--------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro
async def change_presence(self, *, activity=None, status=None, afk=False):
"""|coro|
Changes the client's presence.
Example
---------
.. code-block:: python3
game = discord.Game("with the API")
await client.change_presence(status=discord.Status.idle, activity=game)
Parameters
----------
activity: Optional[:class:`.BaseActivity`]
The activity being done. ``None`` if no currently active activity is done.
status: Optional[:class:`.Status`]
Indicates what status to change to. If ``None``, then
:attr:`.Status.online` is used.
afk: Optional[:class:`bool`]
Indicates if you are going AFK. This allows the discord
client to know how to handle push notifications better
for you in case you are actually idle and not lying.
Raises
------
:exc:`.InvalidArgument`
If the ``activity`` parameter is not the proper type.
"""
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
await self.ws.change_presence(activity=activity, status=status, afk=afk)
for guild in self._connection.guilds:
me = guild.me
if me is None:
continue
if activity is not None:
me.activities = (activity,)
else:
me.activities = ()
me.status = status_enum
# Guild stuff
def fetch_guilds(self, *, limit=100, before=None, after=None):
"""Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild...
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to ``100``.
before: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieves guilds before this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after: Union[:class:`.abc.Snowflake`, :class:`datetime.datetime`]
Retrieve guilds after this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
------
:exc:`.HTTPException`
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
"""
return GuildIterator(self, limit=limit, before=before, after=after)
async def fetch_template(self, code):
"""|coro|
Gets a :class:`.Template` from a discord.new URL or code.
Parameters
-----------
code: Union[:class:`.Template`, :class:`str`]
The Discord Template Code or URL (must be a discord.new URL).
Raises
-------
:exc:`.NotFound`
The template is invalid.
:exc:`.HTTPException`
Getting the template failed.
Returns
--------
:class:`.Template`
The template from the URL/code.
"""
code = utils.resolve_template(code)
data = await self.http.get_template(code)
return Template(data=data, state=self._connection)
async def fetch_guild(self, guild_id):
"""|coro|
Retrieves a :class:`.Guild` from an ID.
.. note::
Using this, you will **not** receive :attr:`.Guild.channels`, :attr:`.Guild.members`,
:attr:`.Member.activity` and :attr:`.Member.voice` per :class:`.Member`.
.. note::
This method is an API call. For general usage, consider :meth:`get_guild` instead.
Parameters
-----------
guild_id: :class:`int`
The guild's ID to fetch from.
Raises
------
:exc:`.Forbidden`
You do not have access to the guild.
:exc:`.HTTPException`
Getting the guild failed.
Returns
--------
:class:`.Guild`
The guild from the ID.
"""
data = await self.http.get_guild(guild_id)
return Guild(data=data, state=self._connection)
async def create_guild(self, name, region=None, icon=None, *, code=None):
"""|coro|
Creates a :class:`.Guild`.
Bot accounts in more than 10 guilds are not allowed to create guilds.
Parameters
----------
name: :class:`str`
The name of the guild.
region: :class:`.VoiceRegion`
The region for the voice communication server.
Defaults to :attr:`.VoiceRegion.us_west`.
icon: :class:`bytes`
The :term:`py:bytes-like object` representing the icon. See :meth:`.ClientUser.edit`
for more details on what is expected.
code: Optional[:class:`str`]
The code for a template to create the guild with.
.. versionadded:: 1.4
Raises
------
:exc:`.HTTPException`
Guild creation failed.
:exc:`.InvalidArgument`
Invalid icon image format given. Must be PNG or JPG.
Returns
-------
:class:`.Guild`
The guild created. This is not the same guild that is
added to cache.
"""
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
region = region or VoiceRegion.us_west
region_value = region.value
if code:
data = await self.http.create_from_template(code, name, region_value, icon)
else:
data = await self.http.create_guild(name, region_value, icon)
return Guild(data=data, state=self._connection)
# Invite management
async def fetch_invite(self, url, *, with_counts=True):
"""|coro|
Gets an :class:`.Invite` from a discord.gg URL or ID.
.. note::
If the invite is for a guild you have not joined, the guild and channel
attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and
:class:`.PartialInviteChannel` respectively.
Parameters
-----------
url: Union[:class:`.Invite`, :class:`str`]
The Discord invite ID or URL (must be a discord.gg URL).
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count`
fields.
Raises
-------
:exc:`.NotFound`
The invite has expired or is invalid.
:exc:`.HTTPException`
Getting the invite failed.
Returns
--------
:class:`.Invite`
The invite from the URL/ID.
"""
invite_id = utils.resolve_invite(url)
data = await self.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._connection, data=data)
async def delete_invite(self, invite):
"""|coro|
Revokes an :class:`.Invite`, URL, or ID to an invite.
You must have the :attr:`~.Permissions.manage_channels` permission in
the associated guild to do this.
Parameters
----------
invite: Union[:class:`.Invite`, :class:`str`]
The invite to revoke.
Raises
-------
:exc:`.Forbidden`
You do not have permissions to revoke invites.
:exc:`.NotFound`
The invite is invalid or expired.
:exc:`.HTTPException`
Revoking the invite failed.
"""
invite_id = utils.resolve_invite(invite)
await self.http.delete_invite(invite_id)
# Miscellaneous stuff
async def fetch_widget(self, guild_id):
"""|coro|
Gets a :class:`.Widget` from a guild ID.
.. note::
The guild must have the widget enabled to get this information.
Parameters
-----------
guild_id: :class:`int`
The ID of the guild.
Raises
-------
:exc:`.Forbidden`
The widget for this guild is disabled.
:exc:`.HTTPException`
Retrieving the widget failed.
Returns
--------
:class:`.Widget`
The guild's widget.
"""
data = await self.http.get_widget(guild_id)
return Widget(state=self._connection, data=data)
async def application_info(self):
"""|coro|
Retrieves the bot's application information.
Raises
-------
:exc:`.HTTPException`
Retrieving the information failed somehow.
Returns
--------
:class:`.AppInfo`
The bot's application information.
"""
data = await self.http.application_info()
if 'rpc_origins' not in data:
data['rpc_origins'] = None
return AppInfo(self._connection, data)
async def fetch_user(self, user_id):
"""|coro|
Retrieves a :class:`~discord.User` based on their ID. This can only
be used by bot accounts. You do not have to share any guilds
with the user to get this information, however many operations
do require that you do.
.. note::
This method is an API call. If you have :attr:`Intents.members` and member cache enabled, consider :meth:`get_user` instead.
Parameters
-----------
user_id: :class:`int`
The user's ID to fetch from.
Raises
-------
:exc:`.NotFound`
A user with this ID does not exist.
:exc:`.HTTPException`
Fetching the user failed.
Returns
--------
:class:`~discord.User`
The user you requested.
"""
data = await self.http.get_user(user_id)
return User(state=self._connection, data=data)
@utils.deprecated()
async def fetch_user_profile(self, user_id):
"""|coro|
Gets an arbitrary user's profile.
.. deprecated:: 1.7
.. note::
This can only be used by non-bot accounts.
Parameters
------------
user_id: :class:`int`
The ID of the user to fetch their profile for.
Raises
-------
:exc:`.Forbidden`
Not allowed to fetch profiles.
:exc:`.HTTPException`
Fetching the profile failed.
Returns
--------
:class:`.Profile`
The profile of the user.
"""
state = self._connection
data = await self.http.get_user_profile(user_id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
user = data['user']
return Profile(flags=user.get('flags', 0),
premium_since=utils.parse_time(since),
mutual_guilds=mutual_guilds,
user=User(data=user, state=state),
connected_accounts=data['connected_accounts'])
async def fetch_channel(self, channel_id):
"""|coro|
Retrieves a :class:`.abc.GuildChannel` or :class:`.abc.PrivateChannel` with the specified ID.
.. note::
This method is an API call. For general usage, consider :meth:`get_channel` instead.
.. versionadded:: 1.2
Raises
-------
:exc:`.InvalidData`
An unknown channel type was received from Discord.
:exc:`.HTTPException`
Retrieving the channel failed.
:exc:`.NotFound`
Invalid Channel ID.
:exc:`.Forbidden`
You do not have permission to fetch this channel.
Returns
--------
Union[:class:`.abc.GuildChannel`, :class:`.abc.PrivateChannel`]
The channel from the ID.
"""
data = await self.http.get_channel(channel_id)
factory, ch_type = _channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
channel = factory(me=self.user, data=data, state=self._connection)
else:
guild_id = int(data['guild_id'])
guild = self.get_guild(guild_id) or Object(id=guild_id)
channel = factory(guild=guild, state=self._connection, data=data)
return channel
async def fetch_webhook(self, webhook_id):
"""|coro|
Retrieves a :class:`.Webhook` with the specified ID.
Raises
--------
:exc:`.HTTPException`
Retrieving the webhook failed.
:exc:`.NotFound`
Invalid webhook ID.
:exc:`.Forbidden`
You do not have permission to fetch this webhook.
Returns
---------
:class:`.Webhook`
The webhook you requested.
"""
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection)
| 33.523746 | 136 | 0.58494 |
import asyncio
import logging
import signal
import sys
import traceback
import aiohttp
from .user import User, Profile
from .invite import Invite
from .template import Template
from .widget import Widget
from .guild import Guild
from .channel import _channel_factory
from .enums import ChannelType
from .mentions import AllowedMentions
from .errors import *
from .enums import Status, VoiceRegion
from .gateway import *
from .activity import BaseActivity, create_activity
from .voice_client import VoiceClient
from .http import HTTPClient
from .state import ConnectionState
from . import utils
from .object import Object
from .backoff import ExponentialBackoff
from .webhook import Webhook
from .iterators import GuildIterator
from .appinfo import AppInfo
log = logging.getLogger(__name__)
def _cancel_tasks(loop):
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
task_retriever = asyncio.all_tasks
tasks = {t for t in task_retriever(loop=loop) if not t.done()}
if not tasks:
return
log.info('Cleaning up after %d tasks.', len(tasks))
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
log.info('All tasks finished cancelling.')
for task in tasks:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'Unhandled exception during Client.run shutdown.',
'exception': task.exception(),
'task': task
})
def _cleanup_loop(loop):
try:
_cancel_tasks(loop)
if sys.version_info >= (3, 6):
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
log.info('Closing the event loop.')
loop.close()
class _ClientEventTask(asyncio.Task):
def __init__(self, original_coro, event_name, coro, *, loop):
super().__init__(coro, loop=loop)
self.__event_name = event_name
self.__original_coro = original_coro
def __repr__(self):
info = [
('state', self._state.lower()),
('event', self.__event_name),
('coro', repr(self.__original_coro)),
]
if self._exception is not None:
info.append(('exception', repr(self._exception)))
return '<ClientEventTask {}>'.format(' '.join('%s=%s' % t for t in info))
class Client:
def __init__(self, *, loop=None, **options):
self.ws = None
self.loop = asyncio.get_event_loop() if loop is None else loop
self._listeners = {}
self.shard_id = options.get('shard_id')
self.shard_count = options.get('shard_count')
connector = options.pop('connector', None)
proxy = options.pop('proxy', None)
proxy_auth = options.pop('proxy_auth', None)
unsync_clock = options.pop('assume_unsync_clock', True)
self.http = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop)
self._handlers = {
'ready': self._handle_ready
}
self._hooks = {
'before_identify': self._call_before_identify_hook
}
self._connection = self._get_state(**options)
self._connection.shard_count = self.shard_count
self._closed = False
self._ready = asyncio.Event()
self._connection._get_websocket = self._get_websocket
self._connection._get_client = lambda: self
if VoiceClient.warn_nacl:
VoiceClient.warn_nacl = False
log.warning("PyNaCl is not installed, voice will NOT be supported")
def _get_websocket(self, guild_id=None, *, shard_id=None):
return self.ws
def _get_state(self, **options):
return ConnectionState(dispatch=self.dispatch, handlers=self._handlers,
hooks=self._hooks, syncer=self._syncer, http=self.http, loop=self.loop, **options)
async def _syncer(self, guilds):
await self.ws.request_sync(guilds)
def _handle_ready(self):
self._ready.set()
@property
def latency(self):
ws = self.ws
return float('nan') if not ws else ws.latency
def is_ws_ratelimited(self):
if self.ws:
return self.ws.is_ratelimited()
return False
@property
def user(self):
return self._connection.user
@property
def guilds(self):
return self._connection.guilds
@property
def emojis(self):
return self._connection.emojis
@property
def cached_messages(self):
return utils.SequenceProxy(self._connection._messages or [])
@property
def private_channels(self):
return self._connection.private_channels
@property
def voice_clients(self):
return self._connection.voice_clients
def is_ready(self):
return self._ready.is_set()
async def _run_event(self, coro, event_name, *args, **kwargs):
try:
await coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
await self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def _schedule_event(self, coro, event_name, *args, **kwargs):
wrapped = self._run_event(coro, event_name, *args, **kwargs)
return _ClientEventTask(original_coro=coro, event_name=event_name, coro=wrapped, loop=self.loop)
def dispatch(self, event, *args, **kwargs):
log.debug('Dispatching event %s', event)
method = 'on_' + event
listeners = self._listeners.get(event)
if listeners:
removed = []
for i, (future, condition) in enumerate(listeners):
if future.cancelled():
removed.append(i)
continue
try:
result = condition(*args)
except Exception as exc:
future.set_exception(exc)
removed.append(i)
else:
if result:
if len(args) == 0:
future.set_result(None)
elif len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
removed.append(i)
if len(removed) == len(listeners):
self._listeners.pop(event)
else:
for idx in reversed(removed):
del listeners[idx]
try:
coro = getattr(self, method)
except AttributeError:
pass
else:
self._schedule_event(coro, method, *args, **kwargs)
async def on_error(self, event_method, *args, **kwargs):
print('Ignoring exception in {}'.format(event_method), file=sys.stderr)
traceback.print_exc()
@utils.deprecated('Guild.chunk')
async def request_offline_members(self, *guilds):
if any(g.unavailable for g in guilds):
raise InvalidArgument('An unavailable guild was passed.')
for guild in guilds:
await self._connection.chunk_guild(guild)
async def _call_before_identify_hook(self, shard_id, *, initial=False):
await self.before_identify_hook(shard_id, initial=initial)
async def before_identify_hook(self, shard_id, *, initial=False):
if not initial:
await asyncio.sleep(5.0)
async def login(self, token, *, bot=True):
log.info('logging in using static token')
await self.http.static_login(token.strip(), bot=bot)
self._connection.is_bot = bot
@utils.deprecated('Client.close')
async def logout(self):
await self.close()
async def connect(self, *, reconnect=True):
backoff = ExponentialBackoff()
ws_params = {
'initial': True,
'shard_id': self.shard_id,
}
while not self.is_closed():
try:
coro = DiscordWebSocket.from_client(self, **ws_params)
self.ws = await asyncio.wait_for(coro, timeout=60.0)
ws_params['initial'] = False
while True:
await self.ws.poll_event()
except ReconnectWebSocket as e:
log.info('Got a request to %s the websocket.', e.op)
self.dispatch('disconnect')
ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id)
continue
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError) as exc:
self.dispatch('disconnect')
if not reconnect:
await self.close()
if isinstance(exc, ConnectionClosed) and exc.code == 1000:
return
raise
if self.is_closed():
return
# If we get connection reset by peer then try to RESUME
if isinstance(exc, OSError) and exc.errno in (54, 10054):
ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id)
continue
# We should only get this when an unhandled close code happens,
# such as a clean disconnect (1000) or a bad state (bad token, no sharding, etc)
# sometimes, discord sends us 1000 for unknown reasons so we should reconnect
# regardless and rely on is_closed instead
if isinstance(exc, ConnectionClosed):
if exc.code == 4014:
raise PrivilegedIntentsRequired(exc.shard_id) from None
if exc.code != 1000:
await self.close()
raise
retry = backoff.delay()
log.exception("Attempting a reconnect in %.2fs", retry)
await asyncio.sleep(retry)
# Always try to RESUME the connection
# If the connection is not RESUME-able then the gateway will invalidate the session.
# This is apparently what the official Discord client does.
ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id)
async def close(self):
if self._closed:
return
await self.http.close()
self._closed = True
for voice in self.voice_clients:
try:
await voice.disconnect()
except Exception:
# if an error happens during disconnects, disregard it.
pass
if self.ws is not None and self.ws.open:
await self.ws.close(code=1000)
self._ready.clear()
def clear(self):
self._closed = False
self._ready.clear()
self._connection.clear()
self.http.recreate()
async def start(self, *args, **kwargs):
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
if kwargs:
raise TypeError("unexpected keyword argument(s) %s" % list(kwargs.keys()))
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect)
def run(self, *args, **kwargs):
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
async def runner():
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def stop_loop_on_completion(f):
loop.stop()
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(stop_loop_on_completion)
try:
loop.run_forever()
except KeyboardInterrupt:
log.info('Received signal to terminate bot and event loop.')
finally:
future.remove_done_callback(stop_loop_on_completion)
log.info('Cleaning up tasks.')
_cleanup_loop(loop)
if not future.cancelled():
try:
return future.result()
except KeyboardInterrupt:
# I am unsure why this gets raised here but suppress it anyway
return None
# properties
def is_closed(self):
return self._closed
@property
def activity(self):
return create_activity(self._connection._activity)
@activity.setter
def activity(self, value):
if value is None:
self._connection._activity = None
elif isinstance(value, BaseActivity):
self._connection._activity = value.to_dict()
else:
raise TypeError('activity must derive from BaseActivity.')
@property
def allowed_mentions(self):
return self._connection.allowed_mentions
@allowed_mentions.setter
def allowed_mentions(self, value):
if value is None or isinstance(value, AllowedMentions):
self._connection.allowed_mentions = value
else:
raise TypeError('allowed_mentions must be AllowedMentions not {0.__class__!r}'.format(value))
@property
def intents(self):
return self._connection.intents
# helpers/getters
@property
def users(self):
return list(self._connection._users.values())
def get_channel(self, id):
return self._connection.get_channel(id)
def get_guild(self, id):
return self._connection._get_guild(id)
def get_user(self, id):
return self._connection.get_user(id)
def get_emoji(self, id):
return self._connection.get_emoji(id)
def get_all_channels(self):
for guild in self.guilds:
for channel in guild.channels:
yield channel
def get_all_members(self):
for guild in self.guilds:
for member in guild.members:
yield member
# listeners/waiters
async def wait_until_ready(self):
await self._ready.wait()
def wait_for(self, event, *, check=None, timeout=None):
future = self.loop.create_future()
if check is None:
def _check(*args):
return True
check = _check
ev = event.lower()
try:
listeners = self._listeners[ev]
except KeyError:
listeners = []
self._listeners[ev] = listeners
listeners.append((future, check))
return asyncio.wait_for(future, timeout)
# event registration
def event(self, coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro
async def change_presence(self, *, activity=None, status=None, afk=False):
if status is None:
status = 'online'
status_enum = Status.online
elif status is Status.offline:
status = 'invisible'
status_enum = Status.offline
else:
status_enum = status
status = str(status)
await self.ws.change_presence(activity=activity, status=status, afk=afk)
for guild in self._connection.guilds:
me = guild.me
if me is None:
continue
if activity is not None:
me.activities = (activity,)
else:
me.activities = ()
me.status = status_enum
# Guild stuff
def fetch_guilds(self, *, limit=100, before=None, after=None):
return GuildIterator(self, limit=limit, before=before, after=after)
async def fetch_template(self, code):
code = utils.resolve_template(code)
data = await self.http.get_template(code)
return Template(data=data, state=self._connection)
async def fetch_guild(self, guild_id):
data = await self.http.get_guild(guild_id)
return Guild(data=data, state=self._connection)
async def create_guild(self, name, region=None, icon=None, *, code=None):
if icon is not None:
icon = utils._bytes_to_base64_data(icon)
region = region or VoiceRegion.us_west
region_value = region.value
if code:
data = await self.http.create_from_template(code, name, region_value, icon)
else:
data = await self.http.create_guild(name, region_value, icon)
return Guild(data=data, state=self._connection)
# Invite management
async def fetch_invite(self, url, *, with_counts=True):
invite_id = utils.resolve_invite(url)
data = await self.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._connection, data=data)
async def delete_invite(self, invite):
invite_id = utils.resolve_invite(invite)
await self.http.delete_invite(invite_id)
# Miscellaneous stuff
async def fetch_widget(self, guild_id):
data = await self.http.get_widget(guild_id)
return Widget(state=self._connection, data=data)
async def application_info(self):
data = await self.http.application_info()
if 'rpc_origins' not in data:
data['rpc_origins'] = None
return AppInfo(self._connection, data)
async def fetch_user(self, user_id):
data = await self.http.get_user(user_id)
return User(state=self._connection, data=data)
@utils.deprecated()
async def fetch_user_profile(self, user_id):
state = self._connection
data = await self.http.get_user_profile(user_id)
def transform(d):
return state._get_guild(int(d['id']))
since = data.get('premium_since')
mutual_guilds = list(filter(None, map(transform, data.get('mutual_guilds', []))))
user = data['user']
return Profile(flags=user.get('flags', 0),
premium_since=utils.parse_time(since),
mutual_guilds=mutual_guilds,
user=User(data=user, state=state),
connected_accounts=data['connected_accounts'])
async def fetch_channel(self, channel_id):
data = await self.http.get_channel(channel_id)
factory, ch_type = _channel_factory(data['type'])
if factory is None:
raise InvalidData('Unknown channel type {type} for channel ID {id}.'.format_map(data))
if ch_type in (ChannelType.group, ChannelType.private):
channel = factory(me=self.user, data=data, state=self._connection)
else:
guild_id = int(data['guild_id'])
guild = self.get_guild(guild_id) or Object(id=guild_id)
channel = factory(guild=guild, state=self._connection, data=data)
return channel
async def fetch_webhook(self, webhook_id):
data = await self.http.get_webhook(webhook_id)
return Webhook.from_state(data, state=self._connection)
| true | true |
1c35fe437d43e2a44d93fc07afd57891e291470b | 21,143 | py | Python | resources/lib/kodi/library.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | resources/lib/kodi/library.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | resources/lib/kodi/library.py | Sopor/plugin.video.netflix-1 | ecefb537cdffa368e104864b313fbcc010b44b68 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2020 Stefano Gottardo
Kodi library integration
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import os
from datetime import datetime
from future.utils import iteritems
import xbmc
import resources.lib.utils.api_requests as api
import resources.lib.common as common
import resources.lib.kodi.nfo as nfo
import resources.lib.kodi.ui as ui
from resources.lib.database.db_utils import VidLibProp
from resources.lib.globals import G
from resources.lib.kodi.library_tasks import LibraryTasks
from resources.lib.kodi.library_utils import (request_kodi_library_update, get_library_path,
FOLDER_NAME_MOVIES, FOLDER_NAME_SHOWS,
is_auto_update_library_running, request_kodi_library_scan_decorator,
get_library_subfolders, delay_anti_ban)
from resources.lib.utils.logging import LOG, measure_exec_time_decorator
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
# Reasons that led to the creation of a class for the library operations:
# - Time-consuming update functionality like "full sync of kodi library", "auto update", "export" (large tv show)
# from context menu or settings, can not be performed within of the service side or will cause IPC timeouts
# and could block IPC access for other actions at same time.
# - The scheduled update operation for the library must be performed within the service, with the goal of:
# - Avoid tons of IPC calls that cause the continuous display of the loading screens while using Kodi
# to do other things at same time
# - Avoid use the IPC can improve the time for completion and so improve a bit the cpu use
# - Allows you to check when Kodi will be closed and avoid the force close of the add-on
# - A class allows you to choice to retrieve the data from Netflix by using IPC or directly nfsession.
# The time needed to initialize the class at each operation (about 30ms) is a small cost compared to the advantages.
def get_library_cls():
"""
Get the library class to do library operations
FUNCTION NOT TO BE USED IN ADD-ON SERVICE INSTANCE
"""
# This build a instance of library class by assigning access to external functions through IPC
return Library(api.get_metadata, api.get_mylist_videoids_profile_switch, None)
class Library(LibraryTasks):
"""Kodi library integration"""
def __init__(self, func_get_metadata, func_get_mylist_videoids_profile_switch, func_req_profiles_info):
LibraryTasks.__init__(self)
# External functions
self.ext_func_get_metadata = func_get_metadata
self.ext_func_get_mylist_videoids_profile_switch = func_get_mylist_videoids_profile_switch
self.ext_func_req_profiles_info = func_req_profiles_info
@request_kodi_library_scan_decorator
def export_to_library(self, videoid, show_prg_dialog=True):
"""
Export an item to the Kodi library
:param videoid: the videoid
:param show_prg_dialog: if True show progress dialog, otherwise, a background progress bar
"""
LOG.info('Start exporting {} to the library', videoid)
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.export_item,
title=common.get_local_string(30018),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
@request_kodi_library_scan_decorator
def export_to_library_new_episodes(self, videoid, show_prg_dialog=True):
"""
Export new episodes for a tv show by it's videoid
:param videoid: The videoid of the tv show to process
:param show_prg_dialog: if True show progress dialog, otherwise, a background progress bar
"""
LOG.info('Start exporting new episodes for {}', videoid)
if videoid.mediatype != common.VideoId.SHOW:
LOG.warn('{} is not a tv show, the operation is cancelled', videoid)
return
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.export_new_item,
title=common.get_local_string(30198),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
@request_kodi_library_scan_decorator
def update_library(self, videoid, show_prg_dialog=True):
"""
Update items in the Kodi library
:param videoid: the videoid
:param show_prg_dialog: if True show progress dialog, otherwise, a background progress bar
"""
LOG.info('Start updating {} in the library', videoid)
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.remove_item,
title=common.get_local_string(30061),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
self.execute_library_task_gui(videoid,
self.export_item,
title=common.get_local_string(30061),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
def remove_from_library(self, videoid, show_prg_dialog=True):
"""
Remove an item from the Kodi library
:param videoid: the videoid
:param show_prg_dialog: if True show progress dialog, otherwise, a background progress bar
"""
LOG.info('Start removing {} from library', videoid)
common.remove_videoid_from_kodi_library(videoid)
self.execute_library_task_gui(videoid,
self.remove_item,
title=common.get_local_string(30030),
show_prg_dialog=show_prg_dialog)
def sync_library_with_mylist(self):
"""
Perform a full sync of Kodi library with Netflix "My List",
by deleting everything that was previously exported
"""
LOG.info('Performing sync of Kodi library with My list')
# Clear all the library
self.clear_library()
# Start the sync
self.auto_update_library(True, show_nfo_dialog=True, clear_on_cancel=True)
@measure_exec_time_decorator(is_immediate=True)
def clear_library(self, show_prg_dialog=True):
"""
Delete all exported items to the library
:param show_prg_dialog: if True, will be show a progress dialog window
"""
LOG.info('Start deleting exported library items')
# This will clear all the add-on library data, to prevents possible inconsistencies when for some reason
# such as improper use of the add-on, unexpected error or other has broken the library database data or files
with ui.ProgressDialog(show_prg_dialog, common.get_local_string(30245), max_value=3) as progress_dlg:
progress_dlg.perform_step()
progress_dlg.set_wait_message()
G.SHARED_DB.purge_library()
for folder_name in [FOLDER_NAME_MOVIES, FOLDER_NAME_SHOWS]:
progress_dlg.perform_step()
progress_dlg.set_wait_message()
section_root_dir = common.join_folders_paths(get_library_path(), folder_name)
common.delete_folder_contents(section_root_dir, delete_subfolders=True)
# Clean the Kodi library database
common.clean_library(show_prg_dialog)
def auto_update_library(self, sync_with_mylist, show_prg_dialog=True, show_nfo_dialog=False, clear_on_cancel=False,
update_profiles=False):
"""
Perform an auto update of the exported items in to Kodi library.
- The main purpose is check if there are new seasons/episodes.
- In the case "Sync Kodi library with My list" feature is enabled, will be also synchronized with My List.
:param sync_with_mylist: if True, sync the Kodi library with Netflix My List
:param show_prg_dialog: if True, will be show a progress dialog window and the errors will be notified to user
:param show_nfo_dialog: if True, ask to user if want export NFO files (override custom NFO actions for videoid)
:param clear_on_cancel: if True, when the user cancel the operations will be cleared the entire library
:param update_profiles: if True, before perform sync_with_mylist will be updated the profiles
"""
if is_auto_update_library_running(show_prg_dialog):
return
LOG.info('Start auto-updating of Kodi library {}', '(with sync of My List)' if sync_with_mylist else '')
G.SHARED_DB.set_value('library_auto_update_is_running', True)
G.SHARED_DB.set_value('library_auto_update_start_time', datetime.now())
try:
# Get the full list of the exported tvshows/movies as id (VideoId.value)
exp_tvshows_videoids_values = G.SHARED_DB.get_tvshows_id_list()
exp_movies_videoids_values = G.SHARED_DB.get_movies_id_list()
# Get the exported tv shows (to be updated) as dict (key=videoid, value=type of task)
videoids_tasks = {
common.VideoId.from_path([common.VideoId.SHOW, videoid_value]): self.export_new_item
for videoid_value in G.SHARED_DB.get_tvshows_id_list(VidLibProp['exclude_update'], False)
}
if sync_with_mylist and update_profiles:
# Before do the sync with My list try to update the profiles in the database,
# to do a sanity check of the features that are linked to the profiles
self.ext_func_req_profiles_info(update_database=True) # pylint: disable=not-callable
sync_with_mylist = G.ADDON.getSettingBool('lib_sync_mylist')
# If enabled sync the Kodi library with Netflix My List
if sync_with_mylist:
self._sync_my_list_ops(videoids_tasks, exp_tvshows_videoids_values, exp_movies_videoids_values)
# Show a warning message when there are more than 100 titles to be updated, making too many metadata
# requests may cause blocking of http communication from the server or temporary ban of the account
if show_prg_dialog:
total_titles_upd = sum(task != self.remove_item for task in videoids_tasks.values())
if total_titles_upd >= 100 and not ui.ask_for_confirmation(
common.get_local_string(30122),
common.get_local_string(30059).format(total_titles_upd)):
return
# Start the update operations
ret = self._update_library(videoids_tasks, exp_tvshows_videoids_values, show_prg_dialog, show_nfo_dialog,
clear_on_cancel)
if not ret:
return
request_kodi_library_update(scan=True, clean=True)
# Save date for completed operation to compute next update schedule (used in library_updater.py)
G.SHARED_DB.set_value('library_auto_update_last_start', datetime.now())
LOG.info('Auto update of the Kodi library completed')
if not G.ADDON.getSettingBool('lib_auto_upd_disable_notification'):
ui.show_notification(common.get_local_string(30220), time=5000)
except Exception as exc: # pylint: disable=broad-except
import traceback
LOG.error('An error has occurred in the library auto update: {}', exc)
LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
finally:
G.SHARED_DB.set_value('library_auto_update_is_running', False)
def _sync_my_list_ops(self, videoids_tasks, exp_tvshows_videoids_values, exp_movies_videoids_values):
# Get videoids from the My list (of the chosen profile)
# pylint: disable=not-callable
mylist_video_id_list, mylist_video_id_list_type = self.ext_func_get_mylist_videoids_profile_switch()
# Check if tv shows have been removed from the My List
for videoid_value in exp_tvshows_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
# The tv show no more exist in My List so remove it from library
videoid = common.VideoId.from_path([common.VideoId.SHOW, videoid_value])
videoids_tasks.update({videoid: self.remove_item})
# Check if movies have been removed from the My List
for videoid_value in exp_movies_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
# The movie no more exist in My List so remove it from library
videoid = common.VideoId.from_path([common.VideoId.MOVIE, videoid_value])
videoids_tasks.update({videoid: self.remove_item})
# Add to library the missing tv shows / movies of My List
for index, videoid_value in enumerate(mylist_video_id_list):
if (int(videoid_value) not in exp_tvshows_videoids_values and
int(videoid_value) not in exp_movies_videoids_values):
is_movie = mylist_video_id_list_type[index] == 'movie'
videoid = common.VideoId(**{('movieid' if is_movie else 'tvshowid'): videoid_value})
videoids_tasks.update({videoid: self.export_item if is_movie else self.export_new_item})
def _update_library(self, videoids_tasks, exp_tvshows_videoids_values, show_prg_dialog, show_nfo_dialog,
clear_on_cancel):
# If set ask to user if want to export NFO files (override user custom NFO settings for videoids)
nfo_settings_override = None
if show_nfo_dialog:
nfo_settings_override = nfo.NFOSettings()
nfo_settings_override.show_export_dialog()
# Get the exported tvshows, but to be excluded from the updates
excluded_videoids_values = G.SHARED_DB.get_tvshows_id_list(VidLibProp['exclude_update'], True)
# Start the update operations
with ui.ProgressDialog(show_prg_dialog, max_value=len(videoids_tasks)) as progress_bar:
for videoid, task_handler in iteritems(videoids_tasks):
# Check if current videoid is excluded from updates
if int(videoid.value) in excluded_videoids_values:
continue
# Get the NFO settings for the current videoid
if not nfo_settings_override and int(videoid.value) in exp_tvshows_videoids_values:
# User custom NFO setting
# it is possible that the user has chosen not to export NFO files for a specific tv show
nfo_export = G.SHARED_DB.get_tvshow_property(videoid.value,
VidLibProp['nfo_export'], False)
nfo_settings = nfo.NFOSettings(nfo_export)
else:
nfo_settings = nfo_settings_override or nfo.NFOSettings()
# Execute the task
for index, total_tasks, title in self.execute_library_task(videoid,
task_handler,
nfo_settings=nfo_settings,
notify_errors=show_prg_dialog):
label_partial_op = ' ({}/{})'.format(index + 1, total_tasks) if total_tasks > 1 else ''
progress_bar.set_message(title + label_partial_op)
if progress_bar.is_cancelled():
LOG.warn('Auto update of the Kodi library interrupted by User')
if clear_on_cancel:
self.clear_library(True)
return False
if self.monitor.abortRequested():
LOG.warn('Auto update of the Kodi library interrupted by Kodi')
return False
progress_bar.perform_step()
progress_bar.set_wait_message()
delay_anti_ban()
common.clean_library(show_prg_dialog)
return True
def import_library(self):
"""
Imports an already existing exported STRM library into the add-on library database,
allows you to restore an existing library, by avoiding to recreate it from scratch.
This operations also update the missing tv shows seasons and episodes, and automatically
converts old STRM format type from add-on version 0.13.x or before 1.7.0 to new format.
"""
# If set ask to user if want to export NFO files
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog()
LOG.info('Start importing Kodi library')
remove_folders = [] # List of failed imports paths to be optionally removed
remove_titles = [] # List of failed imports titles to be optionally removed
# Start importing STRM files
folders = get_library_subfolders(FOLDER_NAME_MOVIES) + get_library_subfolders(FOLDER_NAME_SHOWS)
with ui.ProgressDialog(True, max_value=len(folders)) as progress_bar:
for folder_path in folders:
folder_name = os.path.basename(G.py2_decode(xbmc.translatePath(folder_path)))
progress_bar.set_message(folder_name)
try:
videoid = self.import_videoid_from_existing_strm(folder_path, folder_name)
if videoid is None:
# Failed to import, add folder to remove list
remove_folders.append(folder_path)
remove_titles.append(folder_name)
continue
# Successfully imported, Execute the task
for index, total_tasks, title in self.execute_library_task(videoid,
self.export_item,
nfo_settings=nfo_settings,
notify_errors=True):
label_partial_op = ' ({}/{})'.format(index + 1, total_tasks) if total_tasks > 1 else ''
progress_bar.set_message(title + label_partial_op)
if progress_bar.is_cancelled():
LOG.warn('Import library interrupted by User')
return
if self.monitor.abortRequested():
LOG.warn('Import library interrupted by Kodi')
return
except ImportWarning:
# Ignore it, something was wrong in STRM file (see _import_videoid in library_jobs.py)
pass
progress_bar.perform_step()
progress_bar.set_wait_message()
delay_anti_ban()
ret = self._import_library_remove(remove_titles, remove_folders)
request_kodi_library_update(scan=True, clean=ret)
def _import_library_remove(self, remove_titles, remove_folders):
if not remove_folders:
return False
# If there are STRM files that it was not possible to import them,
# we will ask to user if you want to delete them
tot_folders = len(remove_folders)
if tot_folders > 50:
remove_titles = remove_titles[:50] + ['...']
message = common.get_local_string(30246).format(tot_folders) + '[CR][CR]' + ', '.join(remove_titles)
if not ui.ask_for_confirmation(common.get_local_string(30140), message):
return False
# Delete all folders
LOG.info('Start deleting folders')
with ui.ProgressDialog(True, max_value=tot_folders) as progress_bar:
for file_path in remove_folders:
progress_bar.set_message('{}/{}'.format(progress_bar.value, tot_folders))
LOG.debug('Deleting folder: {}', file_path)
common.delete_folder(file_path)
progress_bar.perform_step()
return True
| 56.381333 | 119 | 0.636002 |
from __future__ import absolute_import, division, unicode_literals
import os
from datetime import datetime
from future.utils import iteritems
import xbmc
import resources.lib.utils.api_requests as api
import resources.lib.common as common
import resources.lib.kodi.nfo as nfo
import resources.lib.kodi.ui as ui
from resources.lib.database.db_utils import VidLibProp
from resources.lib.globals import G
from resources.lib.kodi.library_tasks import LibraryTasks
from resources.lib.kodi.library_utils import (request_kodi_library_update, get_library_path,
FOLDER_NAME_MOVIES, FOLDER_NAME_SHOWS,
is_auto_update_library_running, request_kodi_library_scan_decorator,
get_library_subfolders, delay_anti_ban)
from resources.lib.utils.logging import LOG, measure_exec_time_decorator
try:
unicode
except NameError:
unicode = str
def get_library_cls():
return Library(api.get_metadata, api.get_mylist_videoids_profile_switch, None)
class Library(LibraryTasks):
def __init__(self, func_get_metadata, func_get_mylist_videoids_profile_switch, func_req_profiles_info):
LibraryTasks.__init__(self)
self.ext_func_get_metadata = func_get_metadata
self.ext_func_get_mylist_videoids_profile_switch = func_get_mylist_videoids_profile_switch
self.ext_func_req_profiles_info = func_req_profiles_info
@request_kodi_library_scan_decorator
def export_to_library(self, videoid, show_prg_dialog=True):
LOG.info('Start exporting {} to the library', videoid)
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.export_item,
title=common.get_local_string(30018),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
@request_kodi_library_scan_decorator
def export_to_library_new_episodes(self, videoid, show_prg_dialog=True):
LOG.info('Start exporting new episodes for {}', videoid)
if videoid.mediatype != common.VideoId.SHOW:
LOG.warn('{} is not a tv show, the operation is cancelled', videoid)
return
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.export_new_item,
title=common.get_local_string(30198),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
@request_kodi_library_scan_decorator
def update_library(self, videoid, show_prg_dialog=True):
LOG.info('Start updating {} in the library', videoid)
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog(videoid.mediatype)
self.execute_library_task_gui(videoid,
self.remove_item,
title=common.get_local_string(30061),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
self.execute_library_task_gui(videoid,
self.export_item,
title=common.get_local_string(30061),
nfo_settings=nfo_settings,
show_prg_dialog=show_prg_dialog)
def remove_from_library(self, videoid, show_prg_dialog=True):
LOG.info('Start removing {} from library', videoid)
common.remove_videoid_from_kodi_library(videoid)
self.execute_library_task_gui(videoid,
self.remove_item,
title=common.get_local_string(30030),
show_prg_dialog=show_prg_dialog)
def sync_library_with_mylist(self):
LOG.info('Performing sync of Kodi library with My list')
self.clear_library()
self.auto_update_library(True, show_nfo_dialog=True, clear_on_cancel=True)
@measure_exec_time_decorator(is_immediate=True)
def clear_library(self, show_prg_dialog=True):
LOG.info('Start deleting exported library items')
with ui.ProgressDialog(show_prg_dialog, common.get_local_string(30245), max_value=3) as progress_dlg:
progress_dlg.perform_step()
progress_dlg.set_wait_message()
G.SHARED_DB.purge_library()
for folder_name in [FOLDER_NAME_MOVIES, FOLDER_NAME_SHOWS]:
progress_dlg.perform_step()
progress_dlg.set_wait_message()
section_root_dir = common.join_folders_paths(get_library_path(), folder_name)
common.delete_folder_contents(section_root_dir, delete_subfolders=True)
common.clean_library(show_prg_dialog)
def auto_update_library(self, sync_with_mylist, show_prg_dialog=True, show_nfo_dialog=False, clear_on_cancel=False,
update_profiles=False):
if is_auto_update_library_running(show_prg_dialog):
return
LOG.info('Start auto-updating of Kodi library {}', '(with sync of My List)' if sync_with_mylist else '')
G.SHARED_DB.set_value('library_auto_update_is_running', True)
G.SHARED_DB.set_value('library_auto_update_start_time', datetime.now())
try:
exp_tvshows_videoids_values = G.SHARED_DB.get_tvshows_id_list()
exp_movies_videoids_values = G.SHARED_DB.get_movies_id_list()
videoids_tasks = {
common.VideoId.from_path([common.VideoId.SHOW, videoid_value]): self.export_new_item
for videoid_value in G.SHARED_DB.get_tvshows_id_list(VidLibProp['exclude_update'], False)
}
if sync_with_mylist and update_profiles:
self.ext_func_req_profiles_info(update_database=True)
sync_with_mylist = G.ADDON.getSettingBool('lib_sync_mylist')
if sync_with_mylist:
self._sync_my_list_ops(videoids_tasks, exp_tvshows_videoids_values, exp_movies_videoids_values)
if show_prg_dialog:
total_titles_upd = sum(task != self.remove_item for task in videoids_tasks.values())
if total_titles_upd >= 100 and not ui.ask_for_confirmation(
common.get_local_string(30122),
common.get_local_string(30059).format(total_titles_upd)):
return
ret = self._update_library(videoids_tasks, exp_tvshows_videoids_values, show_prg_dialog, show_nfo_dialog,
clear_on_cancel)
if not ret:
return
request_kodi_library_update(scan=True, clean=True)
G.SHARED_DB.set_value('library_auto_update_last_start', datetime.now())
LOG.info('Auto update of the Kodi library completed')
if not G.ADDON.getSettingBool('lib_auto_upd_disable_notification'):
ui.show_notification(common.get_local_string(30220), time=5000)
except Exception as exc:
import traceback
LOG.error('An error has occurred in the library auto update: {}', exc)
LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
finally:
G.SHARED_DB.set_value('library_auto_update_is_running', False)
def _sync_my_list_ops(self, videoids_tasks, exp_tvshows_videoids_values, exp_movies_videoids_values):
mylist_video_id_list, mylist_video_id_list_type = self.ext_func_get_mylist_videoids_profile_switch()
for videoid_value in exp_tvshows_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
videoid = common.VideoId.from_path([common.VideoId.SHOW, videoid_value])
videoids_tasks.update({videoid: self.remove_item})
for videoid_value in exp_movies_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
videoid = common.VideoId.from_path([common.VideoId.MOVIE, videoid_value])
videoids_tasks.update({videoid: self.remove_item})
for index, videoid_value in enumerate(mylist_video_id_list):
if (int(videoid_value) not in exp_tvshows_videoids_values and
int(videoid_value) not in exp_movies_videoids_values):
is_movie = mylist_video_id_list_type[index] == 'movie'
videoid = common.VideoId(**{('movieid' if is_movie else 'tvshowid'): videoid_value})
videoids_tasks.update({videoid: self.export_item if is_movie else self.export_new_item})
def _update_library(self, videoids_tasks, exp_tvshows_videoids_values, show_prg_dialog, show_nfo_dialog,
clear_on_cancel):
nfo_settings_override = None
if show_nfo_dialog:
nfo_settings_override = nfo.NFOSettings()
nfo_settings_override.show_export_dialog()
excluded_videoids_values = G.SHARED_DB.get_tvshows_id_list(VidLibProp['exclude_update'], True)
with ui.ProgressDialog(show_prg_dialog, max_value=len(videoids_tasks)) as progress_bar:
for videoid, task_handler in iteritems(videoids_tasks):
if int(videoid.value) in excluded_videoids_values:
continue
if not nfo_settings_override and int(videoid.value) in exp_tvshows_videoids_values:
nfo_export = G.SHARED_DB.get_tvshow_property(videoid.value,
VidLibProp['nfo_export'], False)
nfo_settings = nfo.NFOSettings(nfo_export)
else:
nfo_settings = nfo_settings_override or nfo.NFOSettings()
for index, total_tasks, title in self.execute_library_task(videoid,
task_handler,
nfo_settings=nfo_settings,
notify_errors=show_prg_dialog):
label_partial_op = ' ({}/{})'.format(index + 1, total_tasks) if total_tasks > 1 else ''
progress_bar.set_message(title + label_partial_op)
if progress_bar.is_cancelled():
LOG.warn('Auto update of the Kodi library interrupted by User')
if clear_on_cancel:
self.clear_library(True)
return False
if self.monitor.abortRequested():
LOG.warn('Auto update of the Kodi library interrupted by Kodi')
return False
progress_bar.perform_step()
progress_bar.set_wait_message()
delay_anti_ban()
common.clean_library(show_prg_dialog)
return True
def import_library(self):
nfo_settings = nfo.NFOSettings()
nfo_settings.show_export_dialog()
LOG.info('Start importing Kodi library')
remove_folders = []
remove_titles = []
folders = get_library_subfolders(FOLDER_NAME_MOVIES) + get_library_subfolders(FOLDER_NAME_SHOWS)
with ui.ProgressDialog(True, max_value=len(folders)) as progress_bar:
for folder_path in folders:
folder_name = os.path.basename(G.py2_decode(xbmc.translatePath(folder_path)))
progress_bar.set_message(folder_name)
try:
videoid = self.import_videoid_from_existing_strm(folder_path, folder_name)
if videoid is None:
remove_folders.append(folder_path)
remove_titles.append(folder_name)
continue
for index, total_tasks, title in self.execute_library_task(videoid,
self.export_item,
nfo_settings=nfo_settings,
notify_errors=True):
label_partial_op = ' ({}/{})'.format(index + 1, total_tasks) if total_tasks > 1 else ''
progress_bar.set_message(title + label_partial_op)
if progress_bar.is_cancelled():
LOG.warn('Import library interrupted by User')
return
if self.monitor.abortRequested():
LOG.warn('Import library interrupted by Kodi')
return
except ImportWarning:
pass
progress_bar.perform_step()
progress_bar.set_wait_message()
delay_anti_ban()
ret = self._import_library_remove(remove_titles, remove_folders)
request_kodi_library_update(scan=True, clean=ret)
def _import_library_remove(self, remove_titles, remove_folders):
if not remove_folders:
return False
tot_folders = len(remove_folders)
if tot_folders > 50:
remove_titles = remove_titles[:50] + ['...']
message = common.get_local_string(30246).format(tot_folders) + '[CR][CR]' + ', '.join(remove_titles)
if not ui.ask_for_confirmation(common.get_local_string(30140), message):
return False
LOG.info('Start deleting folders')
with ui.ProgressDialog(True, max_value=tot_folders) as progress_bar:
for file_path in remove_folders:
progress_bar.set_message('{}/{}'.format(progress_bar.value, tot_folders))
LOG.debug('Deleting folder: {}', file_path)
common.delete_folder(file_path)
progress_bar.perform_step()
return True
| true | true |
1c35ff148136ebc9186e9f8c778add8a0271665c | 1,826 | py | Python | sdk/managementgroups/azure-mgmt-managementgroups/azure/mgmt/managementgroups/models/create_management_group_details.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/managementgroups/azure-mgmt-managementgroups/azure/mgmt/managementgroups/models/create_management_group_details.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/managementgroups/azure-mgmt-managementgroups/azure/mgmt/managementgroups/models/create_management_group_details.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CreateManagementGroupDetails(Model):
"""The details of a management group used during creation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar version: The version number of the object.
:vartype version: float
:ivar updated_time: The date and time when this object was last updated.
:vartype updated_time: datetime
:ivar updated_by: The identity of the principal or process that updated
the object.
:vartype updated_by: str
:param parent: Parent.
:type parent: ~azure.mgmt.managementgroups.models.CreateParentGroupInfo
"""
_validation = {
'version': {'readonly': True},
'updated_time': {'readonly': True},
'updated_by': {'readonly': True},
}
_attribute_map = {
'version': {'key': 'version', 'type': 'float'},
'updated_time': {'key': 'updatedTime', 'type': 'iso-8601'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'parent': {'key': 'parent', 'type': 'CreateParentGroupInfo'},
}
def __init__(self, **kwargs):
super(CreateManagementGroupDetails, self).__init__(**kwargs)
self.version = None
self.updated_time = None
self.updated_by = None
self.parent = kwargs.get('parent', None)
| 35.803922 | 76 | 0.613363 |
from msrest.serialization import Model
class CreateManagementGroupDetails(Model):
_validation = {
'version': {'readonly': True},
'updated_time': {'readonly': True},
'updated_by': {'readonly': True},
}
_attribute_map = {
'version': {'key': 'version', 'type': 'float'},
'updated_time': {'key': 'updatedTime', 'type': 'iso-8601'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'parent': {'key': 'parent', 'type': 'CreateParentGroupInfo'},
}
def __init__(self, **kwargs):
super(CreateManagementGroupDetails, self).__init__(**kwargs)
self.version = None
self.updated_time = None
self.updated_by = None
self.parent = kwargs.get('parent', None)
| true | true |
1c35ff5324a7f4991b6d4fcb399afdf907af9100 | 4,077 | py | Python | development/tests/unit/lib/dynamodb/scans_test.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 1 | 2020-09-23T21:40:16.000Z | 2020-09-23T21:40:16.000Z | development/tests/unit/lib/dynamodb/scans_test.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | null | null | null | development/tests/unit/lib/dynamodb/scans_test.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 3 | 2020-07-11T19:18:12.000Z | 2021-08-14T17:43:06.000Z | import json
from lib.dynamodb import scans_table
class TestScanHandler:
def test_add_error(self):
scan_id = scans_table.create_new_scan_id()
function_name = 'functionName'
error1 = 'error1'
error2 = 'error2'
error3 = 'error3'
error4 = {
'Cause': json.dumps({
'errorMessage': 'An error occurred (AccessDenied)',
'errorType':'SampleError'
}),
'Error': 'SampleError'
}
scan_key = {'scan': scans_table.SCAN, 'scanId': scan_id}
scans_table.put_item(
Item={
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
}
)
# test adding error when none previously exist
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
}
]
}
scans_table.add_error(scan_id, function_name, error1)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
# test adding error when error previously exists
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
},
{
'functionName': function_name,
'error': error2
}
]
}
scans_table.add_error(scan_id, function_name, error2)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
# test adding fatal error
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.ERRORED,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
},
{
'functionName': function_name,
'error': error2
}
],
'fatalError' :{
'functionName': function_name,
'error': error3
}
}
scans_table.add_error(scan_id, function_name, error3, is_fatal=True)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
#test JSON parsing a step function error
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.ERRORED,
'scanId': scan_id,
'errors': [
{
'error': 'error1',
'functionName': 'functionName'
},
{
'error': 'error2',
'functionName': 'functionName'
},
{
'error': {
'Error': 'SampleError',
'Cause': {
'errorMessage': 'An error occurred (AccessDenied)',
'errorType': 'SampleError'
}},
'functionName': 'functionName'}
],
'fatalError':
{
'error': 'error3',
'functionName': 'functionName'
}
}
scans_table.add_error(scan_id, function_name, error4, is_fatal=False)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
| 31.361538 | 79 | 0.459652 | import json
from lib.dynamodb import scans_table
class TestScanHandler:
def test_add_error(self):
scan_id = scans_table.create_new_scan_id()
function_name = 'functionName'
error1 = 'error1'
error2 = 'error2'
error3 = 'error3'
error4 = {
'Cause': json.dumps({
'errorMessage': 'An error occurred (AccessDenied)',
'errorType':'SampleError'
}),
'Error': 'SampleError'
}
scan_key = {'scan': scans_table.SCAN, 'scanId': scan_id}
scans_table.put_item(
Item={
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
}
)
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
}
]
}
scans_table.add_error(scan_id, function_name, error1)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.IN_PROGRESS,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
},
{
'functionName': function_name,
'error': error2
}
]
}
scans_table.add_error(scan_id, function_name, error2)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.ERRORED,
'scanId': scan_id,
'errors': [
{
'functionName': function_name,
'error': error1
},
{
'functionName': function_name,
'error': error2
}
],
'fatalError' :{
'functionName': function_name,
'error': error3
}
}
scans_table.add_error(scan_id, function_name, error3, is_fatal=True)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
expected_result = {
'scan': scans_table.SCAN,
'processState': scans_table.ERRORED,
'scanId': scan_id,
'errors': [
{
'error': 'error1',
'functionName': 'functionName'
},
{
'error': 'error2',
'functionName': 'functionName'
},
{
'error': {
'Error': 'SampleError',
'Cause': {
'errorMessage': 'An error occurred (AccessDenied)',
'errorType': 'SampleError'
}},
'functionName': 'functionName'}
],
'fatalError':
{
'error': 'error3',
'functionName': 'functionName'
}
}
scans_table.add_error(scan_id, function_name, error4, is_fatal=False)
result = scans_table.get_item(Key=scan_key)['Item']
assert result.pop('ttl')
assert result == expected_result
| true | true |
1c35ff829758d033554b6548d08a829209cdca03 | 103,582 | py | Python | tests/test_misc.py | lukechilds/lightning | 9bb2b4a9098d6d3e4c19bf9ec00c7c9794cb858f | [
"MIT"
] | null | null | null | tests/test_misc.py | lukechilds/lightning | 9bb2b4a9098d6d3e4c19bf9ec00c7c9794cb858f | [
"MIT"
] | null | null | null | tests/test_misc.py | lukechilds/lightning | 9bb2b4a9098d6d3e4c19bf9ec00c7c9794cb858f | [
"MIT"
] | null | null | null | from bitcoin.rpc import RawProxy
from decimal import Decimal
from fixtures import * # noqa: F401,F403
from fixtures import LightningNode, TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError
from threading import Event
from pyln.testing.utils import (
DEVELOPER, TIMEOUT, VALGRIND, DEPRECATED_APIS, sync_blockheight, only_one,
wait_for, TailableProc, env
)
from utils import (
check_coin_moves, account_balance
)
from ephemeral_port_reserve import reserve
from utils import EXPERIMENTAL_FEATURES
import json
import os
import pytest
import re
import shutil
import signal
import socket
import subprocess
import time
import unittest
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_stop_pending_fundchannel(node_factory, executor):
"""Stop the daemon while waiting for an accept_channel
This used to crash the node, since we were calling unreserve_utxo while
freeing the daemon, but that needs a DB transaction to be open.
"""
l1, l2 = node_factory.get_nodes(2)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# We want l2 to stop replying altogether, not disconnect
os.kill(l2.daemon.proc.pid, signal.SIGSTOP)
# The fundchannel call will not terminate so run it in a future
executor.submit(l1.fundchannel, l2, 10**6)
l1.daemon.wait_for_log('peer_out WIRE_OPEN_CHANNEL')
l1.rpc.stop()
# Now allow l2 a clean shutdown
os.kill(l2.daemon.proc.pid, signal.SIGCONT)
l2.rpc.stop()
def test_names(node_factory):
# Note:
# private keys:
# l1: 41bfd2660762506c9933ade59f1debf7e6495b10c14a92dbcd2d623da2507d3d01,
# l2: c4a813f81ffdca1da6864db81795ad2d320add274452cafa1fb2ac2d07d062bd01
# l3: dae24b3853e1443a176daba5544ee04f7db33ebe38e70bdfdb1da34e89512c1001
configs = [
('0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'JUNIORBEAM', '0266e4'),
('022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'SILENTARTIST', '022d22'),
('035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d', 'HOPPINGFIRE', '035d2b'),
('0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199', 'JUNIORFELONY', '0382ce'),
('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'SOMBERFIRE', '032cf1'),
('0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518', 'LOUDPHOTO', '0265b6')
]
nodes = node_factory.get_nodes(len(configs))
for n, (key, alias, color) in zip(nodes, configs):
assert n.daemon.is_in_log(r'public key {}, alias {}.* \(color #{}\)'
.format(key, alias, color))
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "This migration is based on a sqlite3 snapshot")
def test_db_upgrade(node_factory):
l1 = node_factory.get_node()
l1.stop()
version = subprocess.check_output(['lightningd/lightningd',
'--version']).decode('utf-8').splitlines()[0]
upgrades = l1.db_query("SELECT * from db_upgrades;")
assert len(upgrades) == 1
assert(upgrades[0]['upgrade_from'] == -1)
assert(upgrades[0]['lightning_version'] == version)
# Try resetting to earlier db state.
os.unlink(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
l1.db_manip("CREATE TABLE version (version INTEGER);")
l1.db_manip("INSERT INTO version VALUES (1);")
l1.start()
upgrades = l1.db_query("SELECT * from db_upgrades;")
assert len(upgrades) == 1
assert(upgrades[0]['upgrade_from'] == 1)
assert(upgrades[0]['lightning_version'] == version)
def test_bitcoin_failure(node_factory, bitcoind):
l1 = node_factory.get_node()
# Make sure we're not failing it between getblockhash and getblock.
sync_blockheight(bitcoind, [l1])
def crash_bitcoincli(r):
return {'error': 'go away'}
# This is not a JSON-RPC response by purpose
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', crash_bitcoincli)
l1.daemon.rpcproxy.mock_rpc('getblockhash', crash_bitcoincli)
# This should cause both estimatefee and getblockhash fail
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
'getblockhash .* exited with status 1'])
# And they should retry!
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
'getblockhash .* exited with status 1'])
# Restore, then it should recover and get blockheight.
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
bitcoind.generate_block(5)
sync_blockheight(bitcoind, [l1])
# We refuse to start if bitcoind is in `blocksonly`
l1.stop()
bitcoind.stop()
bitcoind.cmd_line += ["-blocksonly"]
bitcoind.start()
l2 = node_factory.get_node(start=False, expect_fail=True)
with pytest.raises(ValueError):
l2.start(stderr=subprocess.PIPE)
assert l2.daemon.is_in_stderr(r".*deactivating transaction relay is not"
" supported.") is not None
def test_bitcoin_ibd(node_factory, bitcoind):
"""Test that we recognize bitcoin in initial download mode"""
info = bitcoind.rpc.getblockchaininfo()
info['initialblockdownload'] = True
l1 = node_factory.get_node(start=False)
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', info)
l1.start(wait_for_bitcoind_sync=False)
# This happens before the Starting message start() waits for.
assert l1.daemon.is_in_log('Waiting for initial block download')
assert 'warning_bitcoind_sync' in l1.rpc.getinfo()
# "Finish" IDB.
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', None)
l1.daemon.wait_for_log('Bitcoin backend now synced')
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
def test_lightningd_still_loading(node_factory, bitcoind, executor):
"""Test that we recognize we haven't got all blocks from bitcoind"""
mock_release = Event()
# This is slow enough that we're going to notice.
def mock_getblock(r):
conf_file = os.path.join(bitcoind.bitcoin_dir, 'bitcoin.conf')
brpc = RawProxy(btc_conf_file=conf_file)
if r['params'][0] == slow_blockid:
mock_release.wait(TIMEOUT)
return {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
# Start it, establish channel, get extra funds.
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'may_reconnect': True,
'wait_for_bitcoind_sync': False},
{'may_reconnect': True,
'wait_for_bitcoind_sync': False},
{}])
node_factory.join_nodes([l1, l2])
# Balance l1<->l2 channel
l1.pay(l2, 10**9 // 2)
l1.stop()
# Now make sure l2 is behind.
bitcoind.generate_block(2)
# Make sure l2/l3 are synced
sync_blockheight(bitcoind, [l2, l3])
# Make it slow grabbing the final block.
slow_blockid = bitcoind.rpc.getblockhash(bitcoind.rpc.getblockcount())
l1.daemon.rpcproxy.mock_rpc('getblock', mock_getblock)
l1.start(wait_for_bitcoind_sync=False)
# It will warn about being out-of-sync.
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
assert 'warning_lightningd_sync' in l1.rpc.getinfo()
# Make sure it's connected to l2 (otherwise we get TEMPORARY_CHANNEL_FAILURE)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
# Payments will fail. FIXME: More informative msg?
with pytest.raises(RpcError, match=r'TEMPORARY_NODE_FAILURE'):
l1.pay(l2, 1000)
# Can't fund a new channel.
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
with pytest.raises(RpcError, match=r'304'):
if l1.config('experimental-dual-fund'):
psbt = l1.rpc.fundpsbt('10000sat', '253perkw', 250)['psbt']
l1.rpc.openchannel_init(l3.info['id'], '10000sat', psbt)
else:
l1.rpc.fundchannel_start(l3.info['id'], '10000sat')
# Attempting to fund an extremely large transaction should fail
# with a 'unsynced' error
with pytest.raises(RpcError, match=r'304'):
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
# This will work, but will be delayed until synced.
fut = executor.submit(l2.pay, l1, 1000)
l1.daemon.wait_for_log("Deferring incoming commit until we sync")
# Release the mock.
mock_release.set()
fut.result()
assert 'warning_lightningd_sync' not in l1.rpc.getinfo()
# Now we get insufficient funds error
with pytest.raises(RpcError, match=r'301'):
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
# This will now work normally.
l1.pay(l2, 1000)
def test_ping(node_factory):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
def ping_tests(l1, l2):
# 0-byte pong gives just type + length field.
ret = l1.rpc.ping(l2.info['id'], 0, 0)
assert ret['totlen'] == 4
# 1000-byte ping, 0-byte pong.
ret = l1.rpc.ping(l2.info['id'], 1000, 0)
assert ret['totlen'] == 4
# 1000 byte pong.
ret = l1.rpc.ping(l2.info['id'], 1000, 1000)
assert ret['totlen'] == 1004
# Maximum length pong.
ret = l1.rpc.ping(l2.info['id'], 1000, 65531)
assert ret['totlen'] == 65535
# Overlength -> no reply.
for s in range(65532, 65536):
ret = l1.rpc.ping(l2.info['id'], 1000, s)
assert ret['totlen'] == 0
# 65535 - type(2 bytes) - num_pong_bytes(2 bytes) - byteslen(2 bytes)
# = 65529 max.
with pytest.raises(RpcError, match=r'oversize ping'):
l1.rpc.ping(l2.info['id'], 65530, 1)
# Test gossip pinging.
ping_tests(l1, l2)
if DEVELOPER:
l1.daemon.wait_for_log(r'Got pong 1000 bytes \({}\.\.\.\)'
.format(l2.info['version']), timeout=1)
l1.fundchannel(l2, 10**5)
# channeld pinging
ping_tests(l1, l2)
if DEVELOPER:
l1.daemon.wait_for_log(r'Got pong 1000 bytes \({}\.\.\.\)'
.format(l2.info['version']))
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_htlc_sig_persistence(node_factory, bitcoind, executor):
"""Interrupt a payment between two peers, then fail and recover funds using the HTLC sig.
"""
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['+WIRE_COMMITMENT_SIGNED'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
f = executor.submit(l1.pay, l2, 31337000)
l1.daemon.wait_for_log(r'HTLC out 0 RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.stop()
# `pay` call is lost
with pytest.raises(RpcError):
f.result()
# We should have the HTLC sig
assert(len(l1.db_query("SELECT * FROM htlc_sigs;")) == 1)
# This should reload the htlc_sig
l2.rpc.dev_fail(l1.info['id'])
# Make sure it broadcasts to chain.
l2.wait_for_channel_onchain(l1.info['id'])
l2.stop()
bitcoind.generate_block(1)
l1.start()
assert l1.daemon.is_in_log(r'Loaded 1 HTLC signatures from DB')
l1.daemon.wait_for_logs([
r'Peer permanent failure in CHANNELD_NORMAL: Funding transaction spent',
r'Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US'
])
bitcoind.generate_block(5)
l1.daemon.wait_for_log("Broadcasting OUR_HTLC_TIMEOUT_TO_US")
time.sleep(3)
bitcoind.generate_block(1)
l1.daemon.wait_for_logs([
r'Owning output . (\d+)sat .SEGWIT. txid',
])
# We should now have a) the change from funding, b) the
# unilateral to us, and c) the HTLC respend to us
assert len(l1.rpc.listfunds()['outputs']) == 3
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
def test_htlc_out_timeout(node_factory, bitcoind, executor):
"""Test that we drop onchain if the peer doesn't time out HTLC"""
# HTLC 1->2, 1 fails after it's irrevocably committed, can't reconnect
disconnects = ['@WIRE_REVOKE_AND_ACK']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects,
options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid, _ = l1.fundchannel(l2, 10**6)
# Wait for route propagation.
l1.wait_channel_active(chanid)
amt = 200000000
inv = l2.rpc.invoice(amt, 'test_htlc_out_timeout', 'desc')['bolt11']
assert only_one(l2.rpc.listinvoices('test_htlc_out_timeout')['invoices'])['status'] == 'unpaid'
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
# l1 will disconnect, and not reconnect.
l1.daemon.wait_for_log('dev_disconnect: @WIRE_REVOKE_AND_ACK')
# Takes 6 blocks to timeout (cltv-final + 1), but we also give grace period of 1 block.
# shadow route can add extra blocks!
status = only_one(l1.rpc.call('paystatus')['pay'])
if 'shadow' in status:
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
else:
shadowlen = 0
bitcoind.generate_block(5 + 1 + shadowlen)
time.sleep(3)
assert not l1.daemon.is_in_log('hit deadline')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Offered HTLC 0 SENT_ADD_ACK_REVOCATION cltv .* hit deadline')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# L1 will timeout HTLC immediately
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 0 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(4)
# It should now claim both the to-local and htlc-timeout-tx outputs.
l1.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
'sendrawtx exit 0',
'sendrawtx exit 0'])
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
def test_htlc_in_timeout(node_factory, bitcoind, executor):
"""Test that we drop onchain if the peer doesn't accept fulfilled HTLC"""
# HTLC 1->2, 1 fails after 2 has sent committed the fulfill
disconnects = ['-WIRE_REVOKE_AND_ACK*2']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects,
options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid, _ = l1.fundchannel(l2, 10**6)
l1.wait_channel_active(chanid)
sync_blockheight(bitcoind, [l1, l2])
amt = 200000000
inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11']
assert only_one(l2.rpc.listinvoices('test_htlc_in_timeout')['invoices'])['status'] == 'unpaid'
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
# l1 will disconnect and not reconnect.
l1.daemon.wait_for_log('dev_disconnect: -WIRE_REVOKE_AND_ACK')
# Deadline HTLC expiry minus 1/2 cltv-expiry delta (rounded up) (== cltv - 3). cltv is 5+1.
# shadow route can add extra blocks!
status = only_one(l1.rpc.call('paystatus')['pay'])
if 'shadow' in status:
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
else:
shadowlen = 0
bitcoind.generate_block(2 + shadowlen)
assert not l2.daemon.is_in_log('hit deadline')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Fulfilled HTLC 0 SENT_REMOVE_COMMIT cltv .* hit deadline')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
# L2 will collect HTLC (iff no shadow route)
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(4)
l2.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Now, 100 blocks it should be both done.
bitcoind.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not TEST_NETWORK == 'regtest', 'must be on bitcoin network')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_bech32_funding(node_factory, chainparams):
# Don't get any funds from previous runs.
l1, l2 = node_factory.line_graph(2, opts={'random_hsm': True}, fundchannel=False)
# fund a bech32 address and then open a channel with it
res = l1.openchannel(l2, 25000, 'bech32')
address = res['address']
assert address.startswith(chainparams['bip173_prefix'])
# probably overly paranoid checking
wallettxid = res['wallettxid']
wallettx = l1.bitcoin.rpc.getrawtransaction(wallettxid, True)
fundingtx = l1.bitcoin.rpc.decoderawtransaction(res['fundingtx']['tx'])
def is_p2wpkh(output):
return output['type'] == 'witness_v0_keyhash' and \
address == only_one(output['addresses'])
assert any(is_p2wpkh(output['scriptPubKey']) for output in wallettx['vout'])
assert only_one(fundingtx['vin'])['txid'] == res['wallettxid']
def test_withdraw_misc(node_factory, bitcoind, chainparams):
def dont_spend_outputs(n, txid):
"""Reserve both outputs (we assume there are two!) in case any our ours, so we don't spend change: wrecks accounting checks"""
n.rpc.reserveinputs(bitcoind.rpc.createpsbt([{'txid': txid,
'vout': 0},
{'txid': txid,
'vout': 1}], []))
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
amount = 2000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True,
options={'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Could not afford'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
dont_spend_outputs(l1, out['txid'])
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == amount
# Now make sure an additional two of them were marked as spent
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
if chainparams['name'] != 'regtest':
return
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', amount)
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', amount)
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', amount)
# Should have 2 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 2
# Unreserve everything.
inputs = []
for out in l1.rpc.listfunds()['outputs']:
if out['reserved']:
inputs += [{'txid': out['txid'], 'vout': out['output']}]
l1.rpc.unreserveinputs(bitcoind.rpc.createpsbt(inputs, []))
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Could not afford'):
l1.rpc.withdraw(waddr, 'all')
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
assert account_balance(l1, 'wallet') == 0
wallet_moves = [
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993400000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6600000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993400000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 11961240000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 13440000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 11961240000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 11957603000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 3637000, 'tag': 'chain_fees'},
]
check_coin_moves(l1, 'wallet', wallet_moves, chainparams)
def test_io_logging(node_factory, executor):
l1 = node_factory.get_node(options={'log-level': 'io'})
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Fundchannel manually so we get channeld pid.
l1.fundwallet(10**6 + 1000000)
l1.rpc.fundchannel(l2.info['id'], 10**6)['tx']
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
fut = executor.submit(l1.pay, l2, 200000000)
# WIRE_UPDATE_ADD_HTLC = 128 = 0x0080
l1.daemon.wait_for_log(r'channeld.*: \[OUT\] 0080')
# WIRE_UPDATE_FULFILL_HTLC = 130 = 0x0082
l1.daemon.wait_for_log(r'channeld.*: \[IN\] 0082')
fut.result(10)
# Send it sigusr1: should turn off logging.
pid1 = l1.subd_pid('channeld')
subprocess.run(['kill', '-USR1', pid1])
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log(r'channeld.*: \[OUT\] 0080',
start=l1.daemon.logsearch_start)
assert not l1.daemon.is_in_log(r'channeld.*: \[IN\] 0082',
start=l1.daemon.logsearch_start)
# IO logs should not appear in peer logs.
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
assert not any(l['type'] == 'IO_OUT' or l['type'] == 'IO_IN'
for l in peerlog)
# Turn on in l2 channel logging.
pid2 = l2.subd_pid('channeld')
subprocess.run(['kill', '-USR1', pid2])
l1.pay(l2, 200000000)
# Now it should find it.
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
assert any(l['type'] == 'IO_OUT' for l in peerlog)
assert any(l['type'] == 'IO_IN' for l in peerlog)
def test_address(node_factory):
if DEVELOPER:
opts = {'dev-allow-localhost': None}
else:
opts = None
l1 = node_factory.get_node(options=opts)
addr = l1.rpc.getinfo()['address']
if DEVELOPER:
assert len(addr) == 1
assert addr[0]['type'] == 'ipv4'
assert addr[0]['address'] == '127.0.0.1'
assert int(addr[0]['port']) == l1.port
else:
assert len(addr) == 0
bind = l1.rpc.getinfo()['binding']
assert len(bind) == 1
assert bind[0]['type'] == 'ipv4'
assert bind[0]['address'] == '127.0.0.1'
assert int(bind[0]['port']) == l1.port
# Now test UNIX domain binding.
l1.stop()
l1.daemon.opts['bind-addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
l1.start()
l2 = node_factory.get_node()
l2.rpc.connect(l1.info['id'], l1.daemon.opts['bind-addr'])
# 'addr' with local socket works too.
l1.stop()
del l1.daemon.opts['bind-addr']
l1.daemon.opts['addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
# start expects a port, so we open-code here.
l1.daemon.start()
l2 = node_factory.get_node()
l2.rpc.connect(l1.info['id'], l1.daemon.opts['addr'])
@unittest.skipIf(DEPRECATED_APIS, "Tests the --allow-deprecated-apis config")
def test_listconfigs(node_factory, bitcoind, chainparams):
# Make extremely long entry, check it works
l1 = node_factory.get_node(options={'log-prefix': 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'})
configs = l1.rpc.listconfigs()
# See utils.py
assert configs['allow-deprecated-apis'] is False
assert configs['network'] == chainparams['name']
assert configs['ignore-fee-limits'] is False
assert configs['ignore-fee-limits'] is False
assert configs['log-prefix'] == 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx...'
# These are aliases, but we don't print the (unofficial!) wumbo.
assert 'wumbo' not in configs
assert configs['large-channels'] is False
# Test one at a time.
for c in configs.keys():
if c.startswith('#') or c.startswith('plugins') or c == 'important-plugins':
continue
oneconfig = l1.rpc.listconfigs(config=c)
assert(oneconfig[c] == configs[c])
def test_listconfigs_plugins(node_factory, bitcoind, chainparams):
l1 = node_factory.get_node()
# assert that we have pay plugin and that plugins have a name and path
configs = l1.rpc.listconfigs()
assert configs['important-plugins']
assert len([p for p in configs['important-plugins'] if p['name'] == "pay"]) == 1
for p in configs['important-plugins']:
assert p['name'] and len(p['name']) > 0
assert p['path'] and len(p['path']) > 0
assert os.path.isfile(p['path']) and os.access(p['path'], os.X_OK)
def test_multirpc(node_factory):
"""Test that we can do multiple RPC without waiting for response"""
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
commands = [
b'{"id":1,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":2,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":3,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":4,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":5,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":6,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"method": "invoice", "params": [100, "foo", "foo"], "jsonrpc": "2.0", "id": 7 }',
b'{"method": "waitinvoice", "params": ["foo"], "jsonrpc" : "2.0", "id": 8 }',
b'{"method": "delinvoice", "params": ["foo", "unpaid"], "jsonrpc" : "2.0", "id": 9 }',
]
sock.sendall(b'\n'.join(commands))
buff = b''
for i in commands:
_, buff = l1.rpc._readobj(sock, buff)
sock.close()
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_multiplexed_rpc(node_factory):
"""Test that we can do multiple RPCs which exit in different orders"""
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
# Neighbouring ones may be in or out of order.
commands = [
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}',
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}'
]
sock.sendall(b'\n'.join(commands))
buff = b''
# They will return in the same order, since they start immediately
# (delaying completion should mean we don't see the other commands intermingled).
for i in commands:
obj, buff = l1.rpc._readobj(sock, buff)
assert obj['id'] == l1.rpc.decoder.decode(i.decode("UTF-8"))['id']
sock.close()
def test_malformed_rpc(node_factory):
"""Test that we get a correct response to malformed RPC commands"""
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
# No ID
sock.sendall(b'{"jsonrpc":"2.0","method":"getinfo","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
# No method
sock.sendall(b'{"id":1, "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
# Complete crap
sock.sendall(b'[]')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
# Bad ID
sock.sendall(b'{"id":{}, "jsonrpc":"2.0","method":"getinfo","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
# Bad method
sock.sendall(b'{"id":1, "method": 12, "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
# Unknown method
sock.sendall(b'{"id":1, "method": "unknown", "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32601
sock.close()
def test_cli(node_factory):
l1 = node_factory.get_node()
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'help']).decode('utf-8')
# Test some known output.
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
# Test JSON output.
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert j['help'][0]['command'] is not None
assert j['help'][0]['description'] is not None
# Test keyword input (autodetect)
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help', 'command=help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
# Test keyword input (forced)
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-k',
'help', 'command=help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
# Test ordered input (autodetect)
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help', 'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
# Test ordered input (forced)
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-o',
'help', 'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
# Test missing parameters.
try:
# This will error due to missing parameters.
# We want to check if lightningd will crash.
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-o',
'sendpay']).decode('utf-8')
except Exception:
pass
# Test it escapes JSON completely in both method and params.
# cli turns " into \", reply turns that into \\\".
out = subprocess.run(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'x"[]{}'],
stdout=subprocess.PIPE)
assert 'Unknown command \'x\\\\\\"[]{}\'' in out.stdout.decode('utf-8')
subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'invoice', '123000', 'l"[]{}', 'd"[]{}']).decode('utf-8')
# Check label is correct, and also that cli's keyword parsing works.
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-k',
'listinvoices', 'label=l"[]{}']).decode('utf-8')
j = json.loads(out)
assert only_one(j['invoices'])['label'] == 'l"[]{}'
# For those using shell scripts (you know who you are Rene), make sure we're maintaining whitespace
lines = [l for l in out.splitlines() if '"bolt11"' not in l and '"payment_hash"' not in l and '"expires_at"' not in l]
assert lines == ['{',
' "invoices": [',
' {',
r' "label": "l\"[]{}",',
' "msatoshi": 123000,',
' "amount_msat": "123000msat",',
' "status": "unpaid",',
r' "description": "d\"[]{}",',
' }',
' ]',
'}']
# Make sure we omit top-levels and don't include format hint, when -H forced
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-H',
'help']).decode('utf-8')
lines = out.splitlines()
assert [l for l in lines if l.startswith('help=')] == []
assert [l for l in lines if l.startswith('format-hint=')] == []
# Flat format is great for grep. LONG LIVE UNIX!
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-F',
'help']).decode('utf-8')
lines = out.splitlines()
# Everything is a help[XX]= line, except format-hint.
assert [l for l in lines if not re.search(r'^help\[[0-9]*\].', l)] == ['format-hint=simple']
def test_daemon_option(node_factory):
"""
Make sure --daemon at least vaguely works!
"""
# Lazy way to set up command line and env, plus do VALGRIND checks
l1 = node_factory.get_node()
l1.stop()
os.unlink(l1.rpc.socket_path)
logfname = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "log-daemon")
subprocess.run(l1.daemon.cmd_line + ['--daemon', '--log-file={}'.format(logfname)], env=l1.daemon.env,
check=True)
# Test some known output (wait for rpc to be ready)
wait_for(lambda: os.path.exists(l1.rpc.socket_path))
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'help']).decode('utf-8')
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
subprocess.run(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'.format(l1.daemon.lightning_dir),
'stop'], check=True)
# It should not complain that subdaemons aren't children.
with open(logfname, 'r') as f:
assert 'No child process' not in f.read()
@flaky
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_blockchaintrack(node_factory, bitcoind):
"""Check that we track the blockchain correctly across reorgs
"""
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr(addresstype='all')['p2sh-segwit']
######################################################################
# First failure scenario: rollback on startup doesn't work,
# and we try to add a block twice when rescanning:
l1.restart()
height = bitcoind.rpc.getblockcount() # 101
# At height 111 we receive an incoming payment
hashes = bitcoind.generate_block(9) # 102-110
bitcoind.rpc.sendtoaddress(addr, 1)
time.sleep(1) # mempool is still unpredictable
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(P2SH\).* CONFIRMED')
outputs = l1.rpc.listfunds()['outputs']
assert len(outputs) == 1
######################################################################
# Second failure scenario: perform a 20 block reorg
bitcoind.generate_block(10)
l1.daemon.wait_for_log('Adding block {}: '.format(height + 20))
# Now reorg out with a longer fork of 21 blocks
bitcoind.rpc.invalidateblock(hashes[0])
bitcoind.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'
.format(height + 1))
hashes = bitcoind.generate_block(30)
time.sleep(1)
bitcoind.rpc.getblockcount()
l1.daemon.wait_for_log('Adding block {}: '.format(height + 30))
# Our funds got reorged out, we should not have any funds that are confirmed
# NOTE: sendtoaddress() sets locktime=103 and the reorg at 102 invalidates that tx
# and deletes it from mempool
assert [o for o in l1.rpc.listfunds()['outputs'] if o['status'] != "unconfirmed"] == []
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_funding_reorg_private(node_factory, bitcoind):
"""Change funding tx height after lockin, between node restart.
"""
# Rescan to detect reorg at restart and may_reconnect so channeld
# will restart. Reorg can cause bad gossip msg.
opts = {'funding-confirms': 2, 'rescan': 10, 'may_reconnect': True,
'allow_bad_gossip': True}
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
l1.fundwallet(10000000)
sync_blockheight(bitcoind, [l1]) # height 102
bitcoind.generate_block(3) # heights 103-105
l1.rpc.fundchannel(l2.info['id'], "all", announce=False)
bitcoind.generate_block(1) # height 106
daemon = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['status']
== ['{}_AWAITING_LOCKIN:Funding needs 1 more confirmations for lockin.'.format(daemon)])
bitcoind.generate_block(1) # height 107
l1.wait_channel_active('106x1x0')
l1.stop()
# Create a fork that changes short_channel_id from 106x1x0 to 108x1x0
bitcoind.simple_reorg(106, 2) # heights 106-108
bitcoind.generate_block(1) # height 109 (to reach minimum_depth=2 again)
l1.start()
# l2 was running, sees last stale block being removed
l2.daemon.wait_for_logs([r'Removing stale block {}'.format(106),
r'Got depth change .->{} for .* REORG'.format(0)])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('106x1x0')['channels']] == [False, False])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('108x1x0')['channels']] == [True, True])
l1.rpc.close(l2.info['id'])
bitcoind.generate_block(1, True)
l1.daemon.wait_for_log(r'Deleting channel')
l2.daemon.wait_for_log(r'Deleting channel')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_funding_reorg_remote_lags(node_factory, bitcoind):
"""Nodes may disagree about short_channel_id before channel announcement
"""
# may_reconnect so channeld will restart; bad gossip can happen due to reorg
opts = {'funding-confirms': 1, 'may_reconnect': True, 'allow_bad_gossip': True}
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
l1.fundwallet(10000000)
sync_blockheight(bitcoind, [l1]) # height 102
l1.rpc.fundchannel(l2.info['id'], "all")
bitcoind.generate_block(5) # heights 103 - 107
l1.wait_channel_active('103x1x0')
# Make l2 temporary blind for blocks > 107
def no_more_blocks(req):
return {"result": None,
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
l2.daemon.rpcproxy.mock_rpc('getblockhash', no_more_blocks)
# Reorg changes short_channel_id 103x1x0 to 104x1x0, l1 sees it, restarts channeld
bitcoind.simple_reorg(103, 1) # heights 103 - 108
# But now it's height 104, we need another block to make it announcable.
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Peer transient failure .* short_channel_id changed to 104x1x0 \(was 103x1x0\)')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Funding transaction locked. They need our announcement signatures.'])
# Unblinding l2 brings it back in sync, restarts channeld and sends its announce sig
l2.daemon.rpcproxy.mock_rpc('getblockhash', None)
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('103x1x0')['channels']] == [False, False])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('104x1x0')['channels']] == [True, True])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Funding transaction locked. Channel announced.'])
l1.rpc.close(l2.info['id'])
bitcoind.generate_block(1, True)
l1.daemon.wait_for_log(r'Deleting channel')
l2.daemon.wait_for_log(r'Deleting channel')
def test_rescan(node_factory, bitcoind):
"""Test the rescan option
"""
l1 = node_factory.get_node()
# The first start should start at current_height - 30 = 71, make sure
# it's not earlier
l1.daemon.wait_for_log(r'Adding block 101')
assert not l1.daemon.is_in_log(r'Adding block 70')
# Restarting with a higher rescan should go back further
l1.daemon.opts['rescan'] = 50
l1.restart()
l1.daemon.wait_for_log(r'Adding block 101')
assert l1.daemon.is_in_log(r'Adding block 51')
assert not l1.daemon.is_in_log(r'Adding block 50')
# Restarting with an absolute rescan should start from there
l1.daemon.opts['rescan'] = -31
l1.restart()
l1.daemon.wait_for_log(r'Adding block 101')
assert l1.daemon.is_in_log(r'Adding block 31')
assert not l1.daemon.is_in_log(r'Adding block 30')
# Restarting with a future absolute blockheight should *fail* if we
# can't find that height
l1.daemon.opts['rescan'] = -500000
l1.stop()
bitcoind.generate_block(4)
with pytest.raises(ValueError):
l1.start()
# Restarting with future absolute blockheight is fine if we can find it.
l1.daemon.opts['rescan'] = -105
oldneedle = l1.daemon.logsearch_start
l1.start()
# This could occur before pubkey msg, so move search needle back.
l1.daemon.logsearch_start = oldneedle
l1.daemon.wait_for_log(r'Adding block 105')
assert not l1.daemon.is_in_log(r'Adding block 102')
def test_bitcoind_goes_backwards(node_factory, bitcoind):
"""Check that we refuse to acknowledge bitcoind giving a shorter chain without explicit rescan"""
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_log('Adding block 110')
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
bitcoind.generate_block(5)
# It will ignore bitcoind and keep asking for block 110.
time.sleep(5)
assert l1.rpc.getinfo()['blockheight'] == 110
assert not l1.daemon.is_in_log('Adding block 109',
start=l1.daemon.logsearch_start)
# Get past that, and it will suddenly read new blocks
bitcoind.generate_block(2)
l1.daemon.wait_for_log('Adding block 109')
l1.daemon.wait_for_log('Adding block 110')
l1.daemon.wait_for_log('Adding block 111')
@flaky
def test_reserve_enforcement(node_factory, executor):
"""Channeld should disallow you spending into your reserve"""
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True, 'allow_warning': True})
# Pay 1000 satoshi to l2.
l1.pay(l2, 1000000)
l2.stop()
# They should both aim for 1%.
reserves = l2.db.query('SELECT channel_reserve_satoshis FROM channel_configs')
assert reserves == [{'channel_reserve_satoshis': 10**6 // 100}] * 2
# Edit db to reduce reserve to 0 so it will try to violate it.
l2.db.execute('UPDATE channel_configs SET channel_reserve_satoshis=0')
l2.start()
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# This should be impossible to pay entire thing back: l1 should warn and
# close connection for trying to violate reserve.
executor.submit(l2.pay, l1, 1000000)
l1.daemon.wait_for_log(
'Peer transient failure in CHANNELD_NORMAL: channeld.*'
' CHANNEL_ERR_CHANNEL_CAPACITY_EXCEEDED'
)
assert only_one(l1.rpc.listpeers()['peers'])['connected'] is False
@unittest.skipIf(not DEVELOPER, "needs dev_disconnect")
def test_htlc_send_timeout(node_factory, bitcoind, compat):
"""Test that we don't commit an HTLC to an unreachable node."""
# Feerates identical so we don't get gratuitous commit to update them
l1, l2, l3 = node_factory.line_graph(3, opts=[{'log-level': 'io',
'feerates': (7500, 7500, 7500, 7500)},
# Blackhole it after it sends HTLC_ADD to l3.
{'log-level': 'io',
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['0WIRE_UPDATE_ADD_HTLC']},
{}],
wait_for_announce=True)
chanid2 = l2.get_channel_scid(l3)
# Make sure we have 30 seconds without any incoming traffic from l3 to l2
# so it tries to ping before sending WIRE_COMMITMENT_SIGNED.
timedout = False
while not timedout:
try:
l2.daemon.wait_for_log(r'channeld-chan#[0-9]*: \[IN\] ', timeout=30)
except TimeoutError:
timedout = True
inv = l3.rpc.invoice(123000, 'test_htlc_send_timeout', 'description')
with pytest.raises(RpcError, match=r'Ran out of routes to try after [0-9]+ attempt[s]?') as excinfo:
l1.rpc.pay(inv['bolt11'])
err = excinfo.value
# Complains it stopped after several attempts.
# FIXME: include in pylightning
PAY_STOPPED_RETRYING = 210
assert err.error['code'] == PAY_STOPPED_RETRYING
status = only_one(l1.rpc.call('paystatus')['pay'])
# Temporary channel failure
assert status['attempts'][0]['failure']['data']['failcode'] == 0x1007
assert status['attempts'][0]['failure']['data']['erring_node'] == l2.info['id']
assert status['attempts'][0]['failure']['data']['erring_channel'] == chanid2
# L2 should send ping, but never receive pong so never send commitment.
l2.daemon.wait_for_log(r'{}-.*channeld.*: \[OUT\] 0012'.format(l3.info['id']))
assert not l2.daemon.is_in_log(r'{}-.*channeld.*: \[IN\] 0013'.format(l3.info['id']))
assert not l2.daemon.is_in_log(r'{}-.*channeld.*: \[OUT\] 0084'.format(l3.info['id']))
# L2 killed the channel with l3 because it was too slow.
l2.daemon.wait_for_log('{}-.*channeld-.*Adding HTLC too slow: killing connection'.format(l3.info['id']))
def test_ipv4_and_ipv6(node_factory):
"""Test we can bind to both IPv4 and IPv6 addresses (if supported)"""
port = reserve()
l1 = node_factory.get_node(options={'addr': ':{}'.format(port)})
bind = l1.rpc.getinfo()['binding']
if len(bind) == 2:
assert bind[0]['type'] == 'ipv6'
assert bind[0]['address'] == '::'
assert int(bind[0]['port']) == port
assert bind[1]['type'] == 'ipv4'
assert bind[1]['address'] == '0.0.0.0'
assert int(bind[1]['port']) == port
else:
# Assume we're IPv4 only...
assert len(bind) == 1
assert bind[0]['type'] == 'ipv4'
assert bind[0]['address'] == '0.0.0.0'
assert int(bind[0]['port']) == port
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Fees on elements are different")
@unittest.skipIf(
not DEVELOPER or DEPRECATED_APIS, "Without DEVELOPER=1 we snap to "
"FEERATE_FLOOR on testnets, and we test the new API."
)
def test_feerates(node_factory):
l1 = node_factory.get_node(options={'log-level': 'io'}, start=False)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', {
'error': {"errors": ["Insufficient data or no feerate found"], "blocks": 0}
})
l1.start()
# All estimation types
types = ["opening", "mutual_close", "unilateral_close", "delayed_to_us",
"htlc_resolution", "penalty"]
# Query feerates (shouldn't give any!)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 2)
feerates = l1.rpc.feerates('perkw')
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 2**32 - 1
assert feerates['perkw']['min_acceptable'] == 253
for t in types:
assert t not in feerates['perkw']
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == 2)
feerates = l1.rpc.feerates('perkb')
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkw' not in feerates
assert feerates['perkb']['max_acceptable'] == (2**32 - 1)
assert feerates['perkb']['min_acceptable'] == 253 * 4
for t in types:
assert t not in feerates['perkb']
# Now try setting them, one at a time.
# Set CONSERVATIVE/2 feerate, for max and unilateral_close
l1.set_feerates((15000, 0, 0, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 3)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 253
# Set CONSERVATIVE/3 feerate, for htlc_resolution and penalty
l1.set_feerates((15000, 11000, 0, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 5)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['perkw']['htlc_resolution'] == 11000
assert feerates['perkw']['penalty'] == 11000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 253
# Set ECONOMICAL/4 feerate, for all but min (so, no mutual_close feerate)
l1.set_feerates((15000, 11000, 6250, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == len(types) - 1 + 2)
feerates = l1.rpc.feerates('perkb')
assert feerates['perkb']['unilateral_close'] == 15000 * 4
assert feerates['perkb']['htlc_resolution'] == 11000 * 4
assert feerates['perkb']['penalty'] == 11000 * 4
assert 'mutual_close' not in feerates['perkb']
for t in types:
if t not in ("unilateral_close", "htlc_resolution", "penalty", "mutual_close"):
assert feerates['perkb'][t] == 25000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkw' not in feerates
assert feerates['perkb']['max_acceptable'] == 15000 * 4 * 10
assert feerates['perkb']['min_acceptable'] == 253 * 4
# Set ECONOMICAL/100 feerate for min
l1.set_feerates((15000, 11000, 6250, 5000), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) >= len(types) + 2)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['perkw']['htlc_resolution'] == 11000
assert feerates['perkw']['penalty'] == 11000
assert feerates['perkw']['mutual_close'] == 5000
for t in types:
if t not in ("unilateral_close", "htlc_resolution", "penalty", "mutual_close"):
assert feerates['perkw'][t] == 25000 // 4
assert 'warning' not in feerates
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 5000 // 2
assert len(feerates['onchain_fee_estimates']) == 5
assert feerates['onchain_fee_estimates']['opening_channel_satoshis'] == feerates['perkw']['opening'] * 702 // 1000
assert feerates['onchain_fee_estimates']['mutual_close_satoshis'] == feerates['perkw']['mutual_close'] * 673 // 1000
assert feerates['onchain_fee_estimates']['unilateral_close_satoshis'] == feerates['perkw']['unilateral_close'] * 598 // 1000
htlc_feerate = feerates["perkw"]["htlc_resolution"]
htlc_timeout_cost = feerates["onchain_fee_estimates"]["htlc_timeout_satoshis"]
htlc_success_cost = feerates["onchain_fee_estimates"]["htlc_success_satoshis"]
if EXPERIMENTAL_FEATURES:
# option_anchor_outputs
assert htlc_timeout_cost == htlc_feerate * 666 // 1000
assert htlc_success_cost == htlc_feerate * 706 // 1000
else:
assert htlc_timeout_cost == htlc_feerate * 663 // 1000
assert htlc_success_cost == htlc_feerate * 703 // 1000
def test_logging(node_factory):
# Since we redirect, node.start() will fail: do manually.
l1 = node_factory.get_node(options={'log-file': 'logfile'}, start=False)
logpath = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile')
logpath_moved = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved')
l1.daemon.start(wait_for_initialized=False)
wait_for(lambda: os.path.exists(logpath))
shutil.move(logpath, logpath_moved)
l1.daemon.proc.send_signal(signal.SIGHUP)
wait_for(lambda: os.path.exists(logpath_moved))
wait_for(lambda: os.path.exists(logpath))
log1 = open(logpath_moved).readlines()
assert log1[-1].endswith("Ending log due to SIGHUP\n")
def check_new_log():
log2 = open(logpath).readlines()
return len(log2) > 0 and log2[0].endswith("Started log due to SIGHUP\n")
wait_for(check_new_log)
# Issue #4240
# Repeated SIGHUP should just re-open the log file
# and not terminate the daemon.
logpath_moved_2 = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved_2')
shutil.move(logpath, logpath_moved_2)
l1.daemon.proc.send_signal(signal.SIGHUP)
wait_for(lambda: os.path.exists(logpath_moved_2))
wait_for(lambda: os.path.exists(logpath))
wait_for(check_new_log)
@unittest.skipIf(VALGRIND,
"Valgrind sometimes fails assert on injected SEGV")
def test_crashlog(node_factory):
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
def has_crash_log(n):
files = os.listdir(os.path.join(n.daemon.lightning_dir, TEST_NETWORK))
crashfiles = [f for f in files if 'crash.log' in f]
return len(crashfiles) > 0
assert not has_crash_log(l1)
l1.daemon.proc.send_signal(signal.SIGSEGV)
wait_for(lambda: has_crash_log(l1))
def test_configfile_before_chdir(node_factory):
"""Must read config file before chdir into lightning dir"""
l1 = node_factory.get_node()
l1.stop()
olddir = os.getcwd()
# as lightning_dir ends in /, basename and dirname don't work as expected.
os.chdir(os.path.dirname(l1.daemon.lightning_dir[:-1]))
config = os.path.join(os.path.basename(l1.daemon.lightning_dir[:-1]), TEST_NETWORK, "test_configfile")
# Test both an early arg and a normal arg.
with open(config, 'wb') as f:
f.write(b'always-use-proxy=true\n')
f.write(b'proxy=127.0.0.1:100\n')
l1.daemon.opts['conf'] = config
# Update executable to point to right place
l1.daemon.executable = os.path.join(olddir, l1.daemon.executable)
l1.start()
assert l1.rpc.listconfigs()['always-use-proxy']
assert l1.rpc.listconfigs()['proxy'] == '127.0.0.1:100'
os.chdir(olddir)
def test_json_error(node_factory):
"""Must return valid json even if it quotes our weirdness"""
l1 = node_factory.get_node()
with pytest.raises(RpcError, match=r'id: should be a channel ID or short channel ID: invalid token'):
l1.rpc.close({"tx": "020000000001011490f737edd2ea2175a032b58ea7cd426dfc244c339cd044792096da3349b18a0100000000ffffffff021c900300000000001600140e64868e2f752314bc82a154c8c5bf32f3691bb74da00b00000000002200205b8cd3b914cf67cdd8fa6273c930353dd36476734fbd962102c2df53b90880cd0247304402202b2e3195a35dc694bbbc58942dc9ba59cc01d71ba55c9b0ad0610ccd6a65633702201a849254453d160205accc00843efb0ad1fe0e186efa6a7cee1fb6a1d36c736a012103d745445c9362665f22e0d96e9e766f273f3260dea39c8a76bfa05dd2684ddccf00000000", "txid": "2128c10f0355354479514f4a23eaa880d94e099406d419bbb0d800143accddbb", "channel_id": "bbddcc3a1400d8b0bb19d40694094ed980a8ea234a4f5179443555030fc12820"})
# Should not corrupt following RPC
l1.rpc.getinfo()
def test_check_command(node_factory):
l1 = node_factory.get_node()
l1.rpc.check(command_to_check='help')
l1.rpc.check(command_to_check='help', command='check')
# Note: this just checks form, not whether it's valid!
l1.rpc.check(command_to_check='help', command='badcommand')
with pytest.raises(RpcError, match=r'Unknown command'):
l1.rpc.check(command_to_check='badcommand')
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.check(command_to_check='help', badarg='x')
# Ensures we have compulsory parameters.
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='connect')
# Even with optional parameters.
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='connect', host='x', port=77)
# Makes sure parameter types are correct.
with pytest.raises(RpcError, match=r'should be an integer'):
l1.rpc.check(command_to_check='connect', id='test', host='x', port="abcd")
# FIXME: python wrapper doesn't let us test array params.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' in obj
assert 'error' not in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "check"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' in obj
assert 'error' not in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "a", "b"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["badcommand"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect", "test", "x", "abcd"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.close()
@unittest.skipIf(not DEVELOPER, "FIXME: without DEVELOPER=1 we timeout")
def test_bad_onion(node_factory, bitcoind):
"""Test that we get a reasonable error from sendpay when an onion is bad"""
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True,
opts={'log-level': 'io'})
h = l4.rpc.invoice(123000, 'test_bad_onion', 'description')['payment_hash']
route = l1.rpc.getroute(l4.info['id'], 123000, 1)['route']
assert len(route) == 3
mangled_nodeid = '0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518'
# Replace id with a different pubkey, so onion encoded badly at third hop.
route[2]['id'] = mangled_nodeid
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
PAY_TRY_OTHER_ROUTE = 204
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
assert err.value.error['data']['erring_node'] == mangled_nodeid
assert err.value.error['data']['erring_channel'] == route[2]['channel']
# We should see a WIRE_UPDATE_FAIL_MALFORMED_HTLC from l4.
line = l4.daemon.is_in_log(r'\[OUT\] 0087')
# 008739d3149a5c37e95f9dae718ce46efc60248e110e10117d384870a6762e8e33030000000000000000d7fc52f6c32773aabca55628fe616058aecc44a384e0abfa85c0c48b449dd38dc005
# type<--------------channelid---------------------------------------><--htlc-id-----><--------------------------------------------- sha_of_onion --->code
sha = re.search(r' 0087.{64}.{16}(.{64})', line).group(1)
# Should see same sha in onionreply
l1.daemon.wait_for_log(r'failcode .* from onionreply .*{sha}'.format(sha=sha))
# Replace id with a different pubkey, so onion encoded badly at second hop.
route[1]['id'] = mangled_nodeid
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
PAY_TRY_OTHER_ROUTE = 204
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
assert err.value.error['data']['erring_node'] == mangled_nodeid
assert err.value.error['data']['erring_channel'] == route[1]['channel']
@unittest.skipIf(not DEVELOPER, "Needs DEVELOPER=1 to force onion fail")
def test_bad_onion_immediate_peer(node_factory, bitcoind):
"""Test that we handle the malformed msg when we're the origin"""
l1, l2 = node_factory.line_graph(2, opts={'dev-fail-process-onionpacket': None})
h = l2.rpc.invoice(123000, 'test_bad_onion_immediate_peer', 'description')['payment_hash']
route = l1.rpc.getroute(l2.info['id'], 123000, 1)['route']
assert len(route) == 1
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_UNPARSEABLE_ONION 202
PAY_UNPARSEABLE_ONION = 202
assert err.value.error['code'] == PAY_UNPARSEABLE_ONION
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
def test_newaddr(node_factory, chainparams):
l1 = node_factory.get_node()
p2sh = l1.rpc.newaddr('p2sh-segwit')
assert 'bech32' not in p2sh
assert p2sh['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
bech32 = l1.rpc.newaddr('bech32')
assert 'p2sh-segwit' not in bech32
assert bech32['bech32'].startswith(chainparams['bip173_prefix'])
both = l1.rpc.newaddr('all')
assert both['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
assert both['bech32'].startswith(chainparams['bip173_prefix'])
def test_newaddr_deprecated(node_factory, chainparams):
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
p2sh = l1.rpc.newaddr('p2sh-segwit')
assert p2sh['address'].startswith(chainparams['p2sh_prefix'])
bech32 = l1.rpc.newaddr('bech32')
assert bech32['address'].startswith(chainparams['bip173_prefix'])
def test_bitcoind_fail_first(node_factory, bitcoind, executor):
"""Make sure we handle spurious bitcoin-cli failures during startup
See [#2687](https://github.com/ElementsProject/lightning/issues/2687) for
details
"""
# Do not start the lightning node since we need to instrument bitcoind
# first.
l1 = node_factory.get_node(start=False)
# Instrument bitcoind to fail some queries first.
def mock_fail(*args):
raise ValueError()
l1.daemon.rpcproxy.mock_rpc('getblockhash', mock_fail)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_fail)
f = executor.submit(l1.start)
wait_for(lambda: l1.daemon.running)
# Make sure it fails on the first `getblock` call (need to use `is_in_log`
# since the `wait_for_log` in `start` sets the offset)
wait_for(lambda: l1.daemon.is_in_log(
r'getblockhash [a-z0-9]* exited with status 1'))
wait_for(lambda: l1.daemon.is_in_log(
r'Unable to estimate opening fees'))
# Now unset the mock, so calls go through again
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
f.result()
@unittest.skipIf(not DEVELOPER, "needs --dev-force-bip32-seed")
@unittest.skipIf(TEST_NETWORK != 'regtest', "Addresses are network specific")
def test_dev_force_bip32_seed(node_factory):
l1 = node_factory.get_node(options={'dev-force-bip32-seed': '0000000000000000000000000000000000000000000000000000000000000001'})
# First is m/0/0/1 ..
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qsdzqt93xsyewdjvagndw9523m27e52er5ca7hm"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qlkt93775wmf33uacykc49v2j4tayn0yj25msjn"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1q2ng546gs0ylfxrvwx0fauzcvhuz655en4kwe2c"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qrdpwrlrmrnvn535l5eldt64lxm8r2nwkv0ruxq"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1q622lwmdzxxterumd746eu3d3t40pq53p62zhlz"
@unittest.skipIf(not DEVELOPER, "needs dev command")
def test_dev_demux(node_factory):
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
# Check should work.
l1.rpc.check(command_to_check='dev', subcommand='crash')
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec=1000)
l1.rpc.check(command_to_check='dev', subcommand='rhash', secret='00' * 32)
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.check(command_to_check='dev', subcommand='foobar')
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.check(command_to_check='dev', subcommand='crash', unk=1)
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec='aaa')
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='dev', subcommand='rhash')
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='dev')
# Non-check failures should fail, in both object and array form.
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.call('dev', {'subcommand': 'foobar'})
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.call('dev', ['foobar'])
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.call('dev', {'subcommand': 'crash', 'unk': 1})
with pytest.raises(RpcError, match=r'too many parameters'):
l1.rpc.call('dev', ['crash', 1])
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': 'aaa'})
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.call('dev', ['slowcmd', 'aaa'])
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev', {'subcommand': 'rhash'})
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev', ['rhash'])
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev')
# Help should list them all.
assert 'subcommand=crash|rhash|slowcmd' in l1.rpc.help('dev')['help'][0]['command']
# These work
assert l1.rpc.call('dev', ['slowcmd', '7'])['msec'] == 7
assert l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': '7'})['msec'] == 7
assert l1.rpc.call('dev', {'subcommand': 'rhash', 'secret': '00' * 32})['rhash'] == '66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925'
with pytest.raises(RpcError):
l1.rpc.call('dev', {'subcommand': 'crash'})
def test_list_features_only(node_factory):
features = subprocess.check_output(['lightningd/lightningd',
'--list-features-only']).decode('utf-8').splitlines()
expected = ['option_data_loss_protect/odd',
'option_upfront_shutdown_script/odd',
'option_gossip_queries/odd',
'option_var_onion_optin/odd',
'option_gossip_queries_ex/odd',
'option_static_remotekey/odd',
'option_payment_secret/odd',
'option_basic_mpp/odd',
]
if EXPERIMENTAL_FEATURES:
expected += ['option_anchor_outputs/odd']
expected += ['option_unknown_102/odd']
assert features == expected
def test_relative_config_dir(node_factory):
l1 = node_factory.get_node(start=False)
initial_dir = os.getcwd()
lndir = l1.daemon.opts.get("lightning-dir")[:-1]
*root_dir, l1.daemon.opts["lightning-dir"] = lndir.split('/')
os.chdir('/'.join(root_dir))
l1.daemon.executable = os.path.join(initial_dir, l1.daemon.executable)
l1.start()
assert os.path.isabs(l1.rpc.listconfigs()["lightning-dir"])
l1.stop()
os.chdir(initial_dir)
def test_signmessage(node_factory):
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
corpus = [[None,
"this is a test!",
l1.rpc.signmessage("this is a test!")['zbase'],
l1.info['id']]]
# Other contributions from LND users!
corpus += [
['@bitconner',
"is this compatible?",
'rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t',
'02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5'],
['@duck1123',
'hi',
'rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg',
'02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b'],
['@jochemin',
'hi',
'ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3',
'022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931'],
]
for c in corpus:
print("Shout out to {}".format(c[0]))
assert subprocess.check_output(['devtools/lightning-checkmessage',
c[1], c[2]]).decode('utf-8') == "Signature claims to be from key {}\n".format(c[3])
subprocess.run(['devtools/lightning-checkmessage', c[1], c[2], c[3]], check=True)
with pytest.raises(subprocess.CalledProcessError):
subprocess.run(['devtools/lightning-checkmessage',
c[1] + "modified", c[2], c[3]], check=True)
assert l1.rpc.checkmessage(c[1], c[2], c[3])['verified']
assert not l1.rpc.checkmessage(c[1] + "modified", c[2], c[3])['verified']
checknokey = l1.rpc.checkmessage(c[1], c[2])
# Of course, we know our own pubkey
if c[3] == l1.info['id']:
assert checknokey['verified']
else:
assert not checknokey['verified']
assert checknokey['pubkey'] == c[3]
# l2 knows about l1, so it can validate it.
zm = l1.rpc.signmessage(message="message for you")['zbase']
checknokey = l2.rpc.checkmessage(message="message for you", zbase=zm)
assert checknokey['pubkey'] == l1.info['id']
assert checknokey['verified']
def test_include(node_factory):
l1 = node_factory.get_node(start=False)
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), "subdir")
os.makedirs(subdir)
with open(os.path.join(subdir, "conf1"), 'w') as f:
f.write('include conf2')
with open(os.path.join(subdir, "conf2"), 'w') as f:
f.write('alias=conf2')
l1.daemon.opts['conf'] = os.path.join(subdir, "conf1")
l1.start()
assert l1.rpc.listconfigs('alias')['alias'] == 'conf2'
def test_config_in_subdir(node_factory, chainparams):
l1 = node_factory.get_node(start=False)
network = chainparams['name']
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), network)
with open(os.path.join(subdir, "config"), 'w') as f:
f.write('alias=test_config_in_subdir')
l1.start()
assert l1.rpc.listconfigs('alias')['alias'] == 'test_config_in_subdir'
l1.stop()
# conf is not allowed in any config file.
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('conf={}/conf'.format(network))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "conf: not permitted in configuration files" in out.stderr.decode('utf-8')
# network is allowed in root config file.
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('network={}'.format(network))
l1.start()
l1.stop()
# but not in network config file.
with open(os.path.join(subdir, "config"), 'w') as f:
f.write('network={}'.format(network))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "network: not permitted in network-specific configuration files" in out.stderr.decode('utf-8')
# lightning-dir only allowed if we explicitly use --conf
os.unlink(os.path.join(subdir, "config"))
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('lightning-dir={}/test'.format(l1.daemon.opts.get("lightning-dir")))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "lightning-dir: not permitted in implicit configuration files" in out.stderr.decode('utf-8')
l1.daemon.opts['conf'] = os.path.join(l1.daemon.opts.get("lightning-dir"), "config")
l1.start()
def restore_valgrind(node, subdir):
"""Move valgrind files back to where fixtures expect them"""
for f in os.listdir(subdir):
if f.startswith('valgrind-errors.'):
shutil.move(os.path.join(subdir, f),
node.daemon.opts.get("lightning-dir"))
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
def test_testnet_upgrade(node_factory):
"""Test that we move files correctly on old testnet upgrade (even without specifying the network)"""
l1 = node_factory.get_node(start=False, may_fail=True)
basedir = l1.daemon.opts.get("lightning-dir")
# Make it old-style
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
os.path.join(basedir, 'hsm_secret'))
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
# Add (empty!) config file; it should be left in place.
with open(os.path.join(basedir, 'config'), 'wb') as f:
f.write(b"# Test config file")
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
pass
# We need to allow this, otherwise no upgrade!
del l1.daemon.opts['allow-deprecated-apis']
# We want to test default network
del l1.daemon.opts['network']
# Wrong chain, will fail to start, but that's OK.
with pytest.raises(ValueError):
l1.start()
netdir = os.path.join(basedir, "testnet")
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
.format(netdir))
# Should move these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert os.path.isfile(os.path.join(netdir, "another_file"))
assert not os.path.isfile(os.path.join(basedir, "another_file"))
# Should NOT move these
assert not os.path.isfile(os.path.join(netdir, "lightningd-testnet.pid"))
assert os.path.isfile(os.path.join(basedir, "lightningd-testnet.pid"))
assert not os.path.isfile(os.path.join(netdir, "config"))
assert os.path.isfile(os.path.join(basedir, "config"))
restore_valgrind(l1, netdir)
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
def test_regtest_upgrade(node_factory):
"""Test that we move files correctly on regtest upgrade"""
l1 = node_factory.get_node(start=False)
basedir = l1.daemon.opts.get("lightning-dir")
netdir = os.path.join(basedir, TEST_NETWORK)
# Make it old-style
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
os.path.join(basedir, 'hsm_secret'))
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
# Add config file which tells us it's regtest; it should be left in place.
with open(os.path.join(basedir, 'config'), 'wb') as f:
f.write(bytes("network={}".format(TEST_NETWORK), "utf-8"))
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
pass
# We need to allow this, otherwise no upgrade!
del l1.daemon.opts['allow-deprecated-apis']
# It should get this from the config file.
del l1.daemon.opts['network']
l1.start()
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
.format(netdir))
# Should move these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert os.path.isfile(os.path.join(netdir, "another_file"))
assert not os.path.isfile(os.path.join(basedir, "another_file"))
# Should NOT move these
assert not os.path.isfile(os.path.join(netdir, "lightningd-{}.pid".format(TEST_NETWORK)))
assert os.path.isfile(os.path.join(basedir, "lightningd-{}.pid".format(TEST_NETWORK)))
assert not os.path.isfile(os.path.join(netdir, "config"))
assert os.path.isfile(os.path.join(basedir, "config"))
# Should restart fine
l1.restart()
restore_valgrind(l1, netdir)
@unittest.skipIf(VALGRIND, "valgrind files can't be written since we rmdir")
@unittest.skipIf(TEST_NETWORK != "regtest", "needs bitcoin mainnet")
def test_new_node_is_mainnet(node_factory):
"""Test that an empty directory causes us to be on mainnet"""
l1 = node_factory.get_node(start=False, may_fail=True)
basedir = l1.daemon.opts.get("lightning-dir")
netdir = os.path.join(basedir, "bitcoin")
shutil.rmtree(basedir)
# Don't suppress upgrade (though it shouldn't happen!)
del l1.daemon.opts['allow-deprecated-apis']
# We want to test default network
del l1.daemon.opts['network']
# Wrong chain, will fail to start, but that's OK.
with pytest.raises(ValueError):
l1.start()
# Should create these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert not os.path.isfile(os.path.join(netdir, "lightningd-bitcoin.pid"))
assert os.path.isfile(os.path.join(basedir, "lightningd-bitcoin.pid"))
def test_unicode_rpc(node_factory, executor, bitcoind):
node = node_factory.get_node()
desc = "Some candy 🍬 and a nice glass of milk 🥛."
node.rpc.invoice(msatoshi=42, label=desc, description=desc)
invoices = node.rpc.listinvoices()['invoices']
assert(len(invoices) == 1)
assert(invoices[0]['description'] == desc)
assert(invoices[0]['label'] == desc)
@unittest.skipIf(VALGRIND, "Testing pyln doesn't exercise anything interesting in the c code.")
def test_unix_socket_path_length(node_factory, bitcoind, directory, executor, db_provider, test_base_dir):
lightning_dir = os.path.join(directory, "anode" + "far" * 30 + "away")
os.makedirs(lightning_dir)
db = db_provider.get_db(lightning_dir, "test_unix_socket_path_length", 1)
l1 = LightningNode(1, lightning_dir, bitcoind, executor, VALGRIND, db=db, port=node_factory.get_next_port())
# `LightningNode.start()` internally calls `LightningRpc.getinfo()` which
# exercises the socket logic, and raises an issue if it fails.
l1.start()
# Let's just call it again to make sure it really works.
l1.rpc.listconfigs()
l1.stop()
def test_waitblockheight(node_factory, executor, bitcoind):
node = node_factory.get_node()
sync_blockheight(bitcoind, [node])
blockheight = node.rpc.getinfo()['blockheight']
# Should succeed without waiting.
node.rpc.waitblockheight(blockheight - 2)
node.rpc.waitblockheight(blockheight - 1)
node.rpc.waitblockheight(blockheight)
# Should not succeed yet.
fut2 = executor.submit(node.rpc.waitblockheight, blockheight + 2)
fut1 = executor.submit(node.rpc.waitblockheight, blockheight + 1)
assert not fut1.done()
assert not fut2.done()
# Should take about ~1second and time out.
with pytest.raises(RpcError):
node.rpc.waitblockheight(blockheight + 2, 1)
# Others should still not be done.
assert not fut1.done()
assert not fut2.done()
# Trigger just one more block.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [node])
fut1.result(5)
assert not fut2.done()
# Trigger two blocks.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [node])
fut2.result(5)
@unittest.skipIf(not DEVELOPER, "Needs dev-sendcustommsg")
def test_sendcustommsg(node_factory):
"""Check that we can send custommsgs to peers in various states.
`l2` is the node under test. `l1` has a channel with `l2` and should
therefore be attached to `channeld`. `l4` is just connected, so it should
be attached to `openingd`. `l3` has a channel open, but is disconnected
and we can't send to it.
"""
opts = {'log-level': 'io', 'plugin': [
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_b.py"),
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_a.py")
]}
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
node_factory.join_nodes([l1, l2, l3])
l2.connect(l4)
l3.stop()
msg = r'ff' * 32
serialized = r'04070020' + msg
# This address doesn't exist so we should get an error when we try sending
# a message to it.
node_id = '02df5ffe895c778e10f7742a6c5b8a0cefbe9465df58b92fadeb883752c8107c8f'
with pytest.raises(RpcError, match=r'No such peer'):
l1.rpc.dev_sendcustommsg(node_id, msg)
# `l3` is disconnected and we can't send messages to it
assert(not l2.rpc.listpeers(l3.info['id'])['peers'][0]['connected'])
with pytest.raises(RpcError, match=r'Peer is not connected'):
l2.rpc.dev_sendcustommsg(l3.info['id'], msg)
# We should not be able to send a bogus `ping` message, since it collides
# with a message defined in the spec, and could potentially mess up our
# internal state.
with pytest.raises(RpcError, match=r'Cannot send messages of type 18 .WIRE_PING.'):
l2.rpc.dev_sendcustommsg(l2.info['id'], r'0012')
# The sendcustommsg RPC call is currently limited to odd-typed messages,
# since they will not result in disconnections or even worse channel
# failures.
with pytest.raises(RpcError, match=r'Cannot send even-typed [0-9]+ custom message'):
l2.rpc.dev_sendcustommsg(l2.info['id'], r'00FE')
# This should work since the peer is currently owned by `channeld`
l2.rpc.dev_sendcustommsg(l1.info['id'], msg)
l2.daemon.wait_for_log(
r'{peer_id}-{owner}-chan#[0-9]: \[OUT\] {serialized}'.format(
owner='channeld', serialized=serialized, peer_id=l1.info['id']
)
)
l1.daemon.wait_for_log(r'\[IN\] {}'.format(serialized))
l1.daemon.wait_for_logs([
r'Got custommessage_a {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
r'Got custommessage_b {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id'])
])
# This should work since the peer is currently owned by `openingd`
l2.rpc.dev_sendcustommsg(l4.info['id'], msg)
l2.daemon.wait_for_log(
r'{peer_id}-{owner}-chan#[0-9]: \[OUT\] {serialized}'.format(
owner='openingd', serialized=serialized, peer_id=l4.info['id']
)
)
l4.daemon.wait_for_log(r'\[IN\] {}'.format(serialized))
l4.daemon.wait_for_logs([
r'Got custommessage_a {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
r'Got custommessage_b {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
])
def test_sendonionmessage(node_factory):
l1, l2, l3 = node_factory.line_graph(3, opts={'experimental-onion-messages': None})
blindedpathtool = os.path.join(os.path.dirname(__file__), "..", "devtools", "blindedpath")
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id']},
{'id': l3.info['id']}]})
assert l3.daemon.wait_for_log('Got onionmsg')
# Now by SCID.
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id'],
'short_channel_id': l2.get_channel_scid(l3)},
{'id': l3.info['id']}]})
assert l3.daemon.wait_for_log('Got onionmsg')
# Now test blinded path.
output = subprocess.check_output(
[blindedpathtool, '--simple-output', 'create', l2.info['id'], l3.info['id']]
).decode('ASCII').strip()
# First line is blinding, then <peerid> then <encblob>.
blinding, p1, p1enc, p2 = output.split('\n')
# First hop can't be blinded!
assert p1 == l2.info['id']
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id'],
'blinding': blinding,
'enctlv': p1enc},
{'id': p2}]})
assert l3.daemon.wait_for_log('Got onionmsg')
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs sendonionmessage")
def test_sendonionmessage_reply(node_factory):
blindedpathtool = os.path.join(os.path.dirname(__file__), "..", "devtools", "blindedpath")
plugin = os.path.join(os.path.dirname(__file__), "plugins", "onionmessage-reply.py")
l1, l2, l3 = node_factory.line_graph(3, opts={'plugin': plugin})
# Make reply path
output = subprocess.check_output(
[blindedpathtool, '--simple-output', 'create', l2.info['id'], l1.info['id']]
).decode('ASCII').strip()
# First line is blinding, then <peerid> then <encblob>.
blinding, p1, p1enc, p2 = output.split('\n')
# First hop can't be blinded!
assert p1 == l2.info['id']
# Also tests oversize payload which won't fit in 1366-byte onion.
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id']},
{'id': l3.info['id'],
'invoice': '77' * 15000}],
'reply_path':
{'blinding': blinding,
'path': [{'id': p1, 'enctlv': p1enc}, {'id': p2}]}})
assert l3.daemon.wait_for_log('Got onionmsg reply_blinding reply_path')
assert l3.daemon.wait_for_log("Got onion_message invoice '{}'".format('77' * 15000))
assert l3.daemon.wait_for_log('Sent reply via')
assert l1.daemon.wait_for_log('Got onionmsg')
@unittest.skipIf(not DEVELOPER, "needs --dev-force-privkey")
def test_getsharedsecret(node_factory):
"""
Test getsharedsecret command.
"""
# From BOLT 8 test vectors.
options = [
{"dev-force-privkey": "1212121212121212121212121212121212121212121212121212121212121212"},
{}
]
l1, l2 = node_factory.get_nodes(2, opts=options)
# Check BOLT 8 test vectors.
shared_secret = l1.rpc.getsharedsecret("028d7500dd4c12685d1f568b4c2b5048e8534b873319f3a8daa612b469132ec7f7")['shared_secret']
assert (shared_secret == "1e2fb3c8fe8fb9f262f649f64d26ecf0f2c0a805a767cf02dc2d77a6ef1fdcc3")
# Clear the forced privkey of l1.
del l1.daemon.opts["dev-force-privkey"]
l1.restart()
# l1 and l2 can generate the same shared secret
# knowing only the public key of the other.
assert (l1.rpc.getsharedsecret(l2.info["id"])["shared_secret"]
== l2.rpc.getsharedsecret(l1.info["id"])["shared_secret"])
def test_commitfee_option(node_factory):
"""Sanity check for the --commit-fee startup option."""
l1, l2 = node_factory.get_nodes(2, opts=[{"commit-fee": "200"}, {}])
mock_wu = 5000
for l in [l1, l2]:
l.set_feerates((mock_wu, 0, 0, 0), True)
l1_commit_fees = l1.rpc.call("estimatefees")["unilateral_close"]
l2_commit_fees = l2.rpc.call("estimatefees")["unilateral_close"]
assert l1_commit_fees == 2 * l2_commit_fees == 2 * 4 * mock_wu # WU->VB
def test_listtransactions(node_factory):
"""Sanity check for the listtransactions RPC command"""
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
wallettxid = l1.openchannel(l2, 10**5)["wallettxid"]
txids = [i["txid"] for tx in l1.rpc.listtransactions()["transactions"]
for i in tx["inputs"]]
# The txid of the transaction funding the channel is present, and
# represented as little endian (like bitcoind and explorers).
assert wallettxid in txids
def test_listfunds(node_factory):
"""Test listfunds command."""
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
open_txid = l1.openchannel(l2, 10**5)["wallettxid"]
# unspent outputs
utxos = l1.rpc.listfunds()["outputs"]
# only 1 unspent output should be available
assert len(utxos) == 1
# both unspent and spent outputs
all_outputs = l1.rpc.listfunds(spent=True)["outputs"]
txids = [output['txid'] for output in all_outputs]
# 1 spent output (channel opening) and 1 unspent output
assert len(all_outputs) == 2
assert open_txid in txids
| 42.295631 | 658 | 0.639918 | from bitcoin.rpc import RawProxy
from decimal import Decimal
from fixtures import *
from fixtures import LightningNode, TEST_NETWORK
from flaky import flaky
from pyln.client import RpcError
from threading import Event
from pyln.testing.utils import (
DEVELOPER, TIMEOUT, VALGRIND, DEPRECATED_APIS, sync_blockheight, only_one,
wait_for, TailableProc, env
)
from utils import (
check_coin_moves, account_balance
)
from ephemeral_port_reserve import reserve
from utils import EXPERIMENTAL_FEATURES
import json
import os
import pytest
import re
import shutil
import signal
import socket
import subprocess
import time
import unittest
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_stop_pending_fundchannel(node_factory, executor):
l1, l2 = node_factory.get_nodes(2)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
os.kill(l2.daemon.proc.pid, signal.SIGSTOP)
executor.submit(l1.fundchannel, l2, 10**6)
l1.daemon.wait_for_log('peer_out WIRE_OPEN_CHANNEL')
l1.rpc.stop()
os.kill(l2.daemon.proc.pid, signal.SIGCONT)
l2.rpc.stop()
def test_names(node_factory):
configs = [
('0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518', 'JUNIORBEAM', '0266e4'),
('022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59', 'SILENTARTIST', '022d22'),
('035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d', 'HOPPINGFIRE', '035d2b'),
('0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199', 'JUNIORFELONY', '0382ce'),
('032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e', 'SOMBERFIRE', '032cf1'),
('0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518', 'LOUDPHOTO', '0265b6')
]
nodes = node_factory.get_nodes(len(configs))
for n, (key, alias, color) in zip(nodes, configs):
assert n.daemon.is_in_log(r'public key {}, alias {}.* \(color #{}\)'
.format(key, alias, color))
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "This migration is based on a sqlite3 snapshot")
def test_db_upgrade(node_factory):
l1 = node_factory.get_node()
l1.stop()
version = subprocess.check_output(['lightningd/lightningd',
'--version']).decode('utf-8').splitlines()[0]
upgrades = l1.db_query("SELECT * from db_upgrades;")
assert len(upgrades) == 1
assert(upgrades[0]['upgrade_from'] == -1)
assert(upgrades[0]['lightning_version'] == version)
os.unlink(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
l1.db_manip("CREATE TABLE version (version INTEGER);")
l1.db_manip("INSERT INTO version VALUES (1);")
l1.start()
upgrades = l1.db_query("SELECT * from db_upgrades;")
assert len(upgrades) == 1
assert(upgrades[0]['upgrade_from'] == 1)
assert(upgrades[0]['lightning_version'] == version)
def test_bitcoin_failure(node_factory, bitcoind):
l1 = node_factory.get_node()
sync_blockheight(bitcoind, [l1])
def crash_bitcoincli(r):
return {'error': 'go away'}
# This is not a JSON-RPC response by purpose
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', crash_bitcoincli)
l1.daemon.rpcproxy.mock_rpc('getblockhash', crash_bitcoincli)
# This should cause both estimatefee and getblockhash fail
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
'getblockhash .* exited with status 1'])
# And they should retry!
l1.daemon.wait_for_logs(['Unable to estimate .* fee',
'getblockhash .* exited with status 1'])
# Restore, then it should recover and get blockheight.
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
bitcoind.generate_block(5)
sync_blockheight(bitcoind, [l1])
# We refuse to start if bitcoind is in `blocksonly`
l1.stop()
bitcoind.stop()
bitcoind.cmd_line += ["-blocksonly"]
bitcoind.start()
l2 = node_factory.get_node(start=False, expect_fail=True)
with pytest.raises(ValueError):
l2.start(stderr=subprocess.PIPE)
assert l2.daemon.is_in_stderr(r".*deactivating transaction relay is not"
" supported.") is not None
def test_bitcoin_ibd(node_factory, bitcoind):
info = bitcoind.rpc.getblockchaininfo()
info['initialblockdownload'] = True
l1 = node_factory.get_node(start=False)
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', info)
l1.start(wait_for_bitcoind_sync=False)
# This happens before the Starting message start() waits for.
assert l1.daemon.is_in_log('Waiting for initial block download')
assert 'warning_bitcoind_sync' in l1.rpc.getinfo()
# "Finish" IDB.
l1.daemon.rpcproxy.mock_rpc('getblockchaininfo', None)
l1.daemon.wait_for_log('Bitcoin backend now synced')
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
def test_lightningd_still_loading(node_factory, bitcoind, executor):
mock_release = Event()
# This is slow enough that we're going to notice.
def mock_getblock(r):
conf_file = os.path.join(bitcoind.bitcoin_dir, 'bitcoin.conf')
brpc = RawProxy(btc_conf_file=conf_file)
if r['params'][0] == slow_blockid:
mock_release.wait(TIMEOUT)
return {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'may_reconnect': True,
'wait_for_bitcoind_sync': False},
{'may_reconnect': True,
'wait_for_bitcoind_sync': False},
{}])
node_factory.join_nodes([l1, l2])
l1.pay(l2, 10**9 // 2)
l1.stop()
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l2, l3])
slow_blockid = bitcoind.rpc.getblockhash(bitcoind.rpc.getblockcount())
l1.daemon.rpcproxy.mock_rpc('getblock', mock_getblock)
l1.start(wait_for_bitcoind_sync=False)
assert 'warning_bitcoind_sync' not in l1.rpc.getinfo()
assert 'warning_lightningd_sync' in l1.rpc.getinfo()
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
# Payments will fail. FIXME: More informative msg?
with pytest.raises(RpcError, match=r'TEMPORARY_NODE_FAILURE'):
l1.pay(l2, 1000)
# Can't fund a new channel.
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
with pytest.raises(RpcError, match=r'304'):
if l1.config('experimental-dual-fund'):
psbt = l1.rpc.fundpsbt('10000sat', '253perkw', 250)['psbt']
l1.rpc.openchannel_init(l3.info['id'], '10000sat', psbt)
else:
l1.rpc.fundchannel_start(l3.info['id'], '10000sat')
with pytest.raises(RpcError, match=r'304'):
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
fut = executor.submit(l2.pay, l1, 1000)
l1.daemon.wait_for_log("Deferring incoming commit until we sync")
mock_release.set()
fut.result()
assert 'warning_lightningd_sync' not in l1.rpc.getinfo()
with pytest.raises(RpcError, match=r'301'):
l1.rpc.txprepare([{l1.rpc.newaddr()['bech32']: '200000000sat'}])
l1.pay(l2, 1000)
def test_ping(node_factory):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
def ping_tests(l1, l2):
ret = l1.rpc.ping(l2.info['id'], 0, 0)
assert ret['totlen'] == 4
ret = l1.rpc.ping(l2.info['id'], 1000, 0)
assert ret['totlen'] == 4
ret = l1.rpc.ping(l2.info['id'], 1000, 1000)
assert ret['totlen'] == 1004
ret = l1.rpc.ping(l2.info['id'], 1000, 65531)
assert ret['totlen'] == 65535
for s in range(65532, 65536):
ret = l1.rpc.ping(l2.info['id'], 1000, s)
assert ret['totlen'] == 0
with pytest.raises(RpcError, match=r'oversize ping'):
l1.rpc.ping(l2.info['id'], 65530, 1)
ping_tests(l1, l2)
if DEVELOPER:
l1.daemon.wait_for_log(r'Got pong 1000 bytes \({}\.\.\.\)'
.format(l2.info['version']), timeout=1)
l1.fundchannel(l2, 10**5)
ping_tests(l1, l2)
if DEVELOPER:
l1.daemon.wait_for_log(r'Got pong 1000 bytes \({}\.\.\.\)'
.format(l2.info['version']))
@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_htlc_sig_persistence(node_factory, bitcoind, executor):
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['+WIRE_COMMITMENT_SIGNED'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
f = executor.submit(l1.pay, l2, 31337000)
l1.daemon.wait_for_log(r'HTLC out 0 RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.stop()
# `pay` call is lost
with pytest.raises(RpcError):
f.result()
# We should have the HTLC sig
assert(len(l1.db_query("SELECT * FROM htlc_sigs;")) == 1)
# This should reload the htlc_sig
l2.rpc.dev_fail(l1.info['id'])
# Make sure it broadcasts to chain.
l2.wait_for_channel_onchain(l1.info['id'])
l2.stop()
bitcoind.generate_block(1)
l1.start()
assert l1.daemon.is_in_log(r'Loaded 1 HTLC signatures from DB')
l1.daemon.wait_for_logs([
r'Peer permanent failure in CHANNELD_NORMAL: Funding transaction spent',
r'Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US'
])
bitcoind.generate_block(5)
l1.daemon.wait_for_log("Broadcasting OUR_HTLC_TIMEOUT_TO_US")
time.sleep(3)
bitcoind.generate_block(1)
l1.daemon.wait_for_logs([
r'Owning output . (\d+)sat .SEGWIT. txid',
])
# We should now have a) the change from funding, b) the
# unilateral to us, and c) the HTLC respend to us
assert len(l1.rpc.listfunds()['outputs']) == 3
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
def test_htlc_out_timeout(node_factory, bitcoind, executor):
# HTLC 1->2, 1 fails after it's irrevocably committed, can't reconnect
disconnects = ['@WIRE_REVOKE_AND_ACK']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects,
options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid, _ = l1.fundchannel(l2, 10**6)
l1.wait_channel_active(chanid)
amt = 200000000
inv = l2.rpc.invoice(amt, 'test_htlc_out_timeout', 'desc')['bolt11']
assert only_one(l2.rpc.listinvoices('test_htlc_out_timeout')['invoices'])['status'] == 'unpaid'
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
l1.daemon.wait_for_log('dev_disconnect: @WIRE_REVOKE_AND_ACK')
status = only_one(l1.rpc.call('paystatus')['pay'])
if 'shadow' in status:
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
else:
shadowlen = 0
bitcoind.generate_block(5 + 1 + shadowlen)
time.sleep(3)
assert not l1.daemon.is_in_log('hit deadline')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Offered HTLC 0 SENT_ADD_ACK_REVOCATION cltv .* hit deadline')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 0 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(4)
l1.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET',
'sendrawtx exit 0',
'sendrawtx exit 0'])
bitcoind.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
def test_htlc_in_timeout(node_factory, bitcoind, executor):
disconnects = ['-WIRE_REVOKE_AND_ACK*2']
l1 = node_factory.get_node(disconnect=disconnects,
options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid, _ = l1.fundchannel(l2, 10**6)
l1.wait_channel_active(chanid)
sync_blockheight(bitcoind, [l1, l2])
amt = 200000000
inv = l2.rpc.invoice(amt, 'test_htlc_in_timeout', 'desc')['bolt11']
assert only_one(l2.rpc.listinvoices('test_htlc_in_timeout')['invoices'])['status'] == 'unpaid'
executor.submit(l1.rpc.dev_pay, inv, use_shadow=False)
# l1 will disconnect and not reconnect.
l1.daemon.wait_for_log('dev_disconnect: -WIRE_REVOKE_AND_ACK')
# Deadline HTLC expiry minus 1/2 cltv-expiry delta (rounded up) (== cltv - 3). cltv is 5+1.
# shadow route can add extra blocks!
status = only_one(l1.rpc.call('paystatus')['pay'])
if 'shadow' in status:
shadowlen = 6 * status['shadow'].count('Added 6 cltv delay for shadow')
else:
shadowlen = 0
bitcoind.generate_block(2 + shadowlen)
assert not l2.daemon.is_in_log('hit deadline')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Fulfilled HTLC 0 SENT_REMOVE_COMMIT cltv .* hit deadline')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
# L2 will collect HTLC (iff no shadow route)
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(4)
l2.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Now, 100 blocks it should be both done.
bitcoind.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not TEST_NETWORK == 'regtest', 'must be on bitcoin network')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_bech32_funding(node_factory, chainparams):
# Don't get any funds from previous runs.
l1, l2 = node_factory.line_graph(2, opts={'random_hsm': True}, fundchannel=False)
res = l1.openchannel(l2, 25000, 'bech32')
address = res['address']
assert address.startswith(chainparams['bip173_prefix'])
wallettxid = res['wallettxid']
wallettx = l1.bitcoin.rpc.getrawtransaction(wallettxid, True)
fundingtx = l1.bitcoin.rpc.decoderawtransaction(res['fundingtx']['tx'])
def is_p2wpkh(output):
return output['type'] == 'witness_v0_keyhash' and \
address == only_one(output['addresses'])
assert any(is_p2wpkh(output['scriptPubKey']) for output in wallettx['vout'])
assert only_one(fundingtx['vin'])['txid'] == res['wallettxid']
def test_withdraw_misc(node_factory, bitcoind, chainparams):
def dont_spend_outputs(n, txid):
n.rpc.reserveinputs(bitcoind.rpc.createpsbt([{'txid': txid,
'vout': 0},
{'txid': txid,
'vout': 1}], []))
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
amount = 2000000
l1 = node_factory.get_node(random_hsm=True,
options={'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Could not afford'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
dont_spend_outputs(l1, out['txid'])
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == amount
# Now make sure an additional two of them were marked as spent
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
if chainparams['name'] != 'regtest':
return
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', amount)
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', amount)
out = l1.rpc.withdraw(waddr, amount)
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1])
dont_spend_outputs(l1, out['txid'])
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', amount)
# Should have 2 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 2
# Unreserve everything.
inputs = []
for out in l1.rpc.listfunds()['outputs']:
if out['reserved']:
inputs += [{'txid': out['txid'], 'vout': out['output']}]
l1.rpc.unreserveinputs(bitcoind.rpc.createpsbt(inputs, []))
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Could not afford'):
l1.rpc.withdraw(waddr, 'all')
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
assert account_balance(l1, 'wallet') == 0
wallet_moves = [
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 2000000000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993760000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6240000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993760000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
[
{'type': 'chain_mvt', 'credit': 0, 'debit': 1993400000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 2000000000, 'tag': 'withdrawal'},
],
{'type': 'chain_mvt', 'credit': 0, 'debit': 6600000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 1993400000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 11961240000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 13440000, 'tag': 'chain_fees'},
{'type': 'chain_mvt', 'credit': 11961240000, 'debit': 0, 'tag': 'deposit'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 0, 'tag': 'spend_track'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 11957603000, 'tag': 'withdrawal'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 3637000, 'tag': 'chain_fees'},
]
check_coin_moves(l1, 'wallet', wallet_moves, chainparams)
def test_io_logging(node_factory, executor):
l1 = node_factory.get_node(options={'log-level': 'io'})
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundwallet(10**6 + 1000000)
l1.rpc.fundchannel(l2.info['id'], 10**6)['tx']
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to CHANNELD_NORMAL')
l2.daemon.wait_for_log(' to CHANNELD_NORMAL')
fut = executor.submit(l1.pay, l2, 200000000)
l1.daemon.wait_for_log(r'channeld.*: \[OUT\] 0080')
l1.daemon.wait_for_log(r'channeld.*: \[IN\] 0082')
fut.result(10)
pid1 = l1.subd_pid('channeld')
subprocess.run(['kill', '-USR1', pid1])
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log(r'channeld.*: \[OUT\] 0080',
start=l1.daemon.logsearch_start)
assert not l1.daemon.is_in_log(r'channeld.*: \[IN\] 0082',
start=l1.daemon.logsearch_start)
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
assert not any(l['type'] == 'IO_OUT' or l['type'] == 'IO_IN'
for l in peerlog)
pid2 = l2.subd_pid('channeld')
subprocess.run(['kill', '-USR1', pid2])
l1.pay(l2, 200000000)
peerlog = only_one(l2.rpc.listpeers(l1.info['id'], "io")['peers'])['log']
assert any(l['type'] == 'IO_OUT' for l in peerlog)
assert any(l['type'] == 'IO_IN' for l in peerlog)
def test_address(node_factory):
if DEVELOPER:
opts = {'dev-allow-localhost': None}
else:
opts = None
l1 = node_factory.get_node(options=opts)
addr = l1.rpc.getinfo()['address']
if DEVELOPER:
assert len(addr) == 1
assert addr[0]['type'] == 'ipv4'
assert addr[0]['address'] == '127.0.0.1'
assert int(addr[0]['port']) == l1.port
else:
assert len(addr) == 0
bind = l1.rpc.getinfo()['binding']
assert len(bind) == 1
assert bind[0]['type'] == 'ipv4'
assert bind[0]['address'] == '127.0.0.1'
assert int(bind[0]['port']) == l1.port
l1.stop()
l1.daemon.opts['bind-addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
l1.start()
l2 = node_factory.get_node()
l2.rpc.connect(l1.info['id'], l1.daemon.opts['bind-addr'])
l1.stop()
del l1.daemon.opts['bind-addr']
l1.daemon.opts['addr'] = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "sock")
l1.daemon.start()
l2 = node_factory.get_node()
l2.rpc.connect(l1.info['id'], l1.daemon.opts['addr'])
@unittest.skipIf(DEPRECATED_APIS, "Tests the --allow-deprecated-apis config")
def test_listconfigs(node_factory, bitcoind, chainparams):
l1 = node_factory.get_node(options={'log-prefix': 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'})
configs = l1.rpc.listconfigs()
assert configs['allow-deprecated-apis'] is False
assert configs['network'] == chainparams['name']
assert configs['ignore-fee-limits'] is False
assert configs['ignore-fee-limits'] is False
assert configs['log-prefix'] == 'lightning1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx...'
assert 'wumbo' not in configs
assert configs['large-channels'] is False
# Test one at a time.
for c in configs.keys():
if c.startswith('
continue
oneconfig = l1.rpc.listconfigs(config=c)
assert(oneconfig[c] == configs[c])
def test_listconfigs_plugins(node_factory, bitcoind, chainparams):
l1 = node_factory.get_node()
# assert that we have pay plugin and that plugins have a name and path
configs = l1.rpc.listconfigs()
assert configs['important-plugins']
assert len([p for p in configs['important-plugins'] if p['name'] == "pay"]) == 1
for p in configs['important-plugins']:
assert p['name'] and len(p['name']) > 0
assert p['path'] and len(p['path']) > 0
assert os.path.isfile(p['path']) and os.access(p['path'], os.X_OK)
def test_multirpc(node_factory):
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
commands = [
b'{"id":1,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":2,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":3,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":4,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":5,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"id":6,"jsonrpc":"2.0","method":"listpeers","params":[]}',
b'{"method": "invoice", "params": [100, "foo", "foo"], "jsonrpc": "2.0", "id": 7 }',
b'{"method": "waitinvoice", "params": ["foo"], "jsonrpc" : "2.0", "id": 8 }',
b'{"method": "delinvoice", "params": ["foo", "unpaid"], "jsonrpc" : "2.0", "id": 9 }',
]
sock.sendall(b'\n'.join(commands))
buff = b''
for i in commands:
_, buff = l1.rpc._readobj(sock, buff)
sock.close()
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_multiplexed_rpc(node_factory):
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
# Neighbouring ones may be in or out of order.
commands = [
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
b'{"id":1,"jsonrpc":"2.0","method":"dev","params":["slowcmd",2000]}',
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
b'{"id":2,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1500]}',
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
b'{"id":3,"jsonrpc":"2.0","method":"dev","params":["slowcmd",1000]}',
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}',
b'{"id":4,"jsonrpc":"2.0","method":"dev","params":["slowcmd",500]}'
]
sock.sendall(b'\n'.join(commands))
buff = b''
# They will return in the same order, since they start immediately
# (delaying completion should mean we don't see the other commands intermingled).
for i in commands:
obj, buff = l1.rpc._readobj(sock, buff)
assert obj['id'] == l1.rpc.decoder.decode(i.decode("UTF-8"))['id']
sock.close()
def test_malformed_rpc(node_factory):
l1 = node_factory.get_node()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
sock.sendall(b'{"jsonrpc":"2.0","method":"getinfo","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
sock.sendall(b'{"id":1, "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
sock.sendall(b'[]')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
sock.sendall(b'{"id":{}, "jsonrpc":"2.0","method":"getinfo","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
sock.sendall(b'{"id":1, "method": 12, "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32600
sock.sendall(b'{"id":1, "method": "unknown", "jsonrpc":"2.0","params":[]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['error']['code'] == -32601
sock.close()
def test_cli(node_factory):
l1 = node_factory.get_node()
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'help']).decode('utf-8')
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert j['help'][0]['command'] is not None
assert j['help'][0]['description'] is not None
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help', 'command=help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-k',
'help', 'command=help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J',
'help', 'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-o',
'help', 'help']).decode('utf-8')
j, _ = json.JSONDecoder().raw_decode(out)
assert 'help [command]' in j['help'][0]['verbose']
try:
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-J', '-o',
'sendpay']).decode('utf-8')
except Exception:
pass
out = subprocess.run(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'x"[]{}'],
stdout=subprocess.PIPE)
assert 'Unknown command \'x\\\\\\"[]{}\'' in out.stdout.decode('utf-8')
subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'invoice', '123000', 'l"[]{}', 'd"[]{}']).decode('utf-8')
# Check label is correct, and also that cli's keyword parsing works.
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-k',
'listinvoices', 'label=l"[]{}']).decode('utf-8')
j = json.loads(out)
assert only_one(j['invoices'])['label'] == 'l"[]{}'
# For those using shell scripts (you know who you are Rene), make sure we're maintaining whitespace
lines = [l for l in out.splitlines() if '"bolt11"' not in l and '"payment_hash"' not in l and '"expires_at"' not in l]
assert lines == ['{',
' "invoices": [',
' {',
r' "label": "l\"[]{}",',
' "msatoshi": 123000,',
' "amount_msat": "123000msat",',
' "status": "unpaid",',
r' "description": "d\"[]{}",',
' }',
' ]',
'}']
# Make sure we omit top-levels and don't include format hint, when -H forced
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-H',
'help']).decode('utf-8')
lines = out.splitlines()
assert [l for l in lines if l.startswith('help=')] == []
assert [l for l in lines if l.startswith('format-hint=')] == []
# Flat format is great for grep. LONG LIVE UNIX!
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'-F',
'help']).decode('utf-8')
lines = out.splitlines()
# Everything is a help[XX]= line, except format-hint.
assert [l for l in lines if not re.search(r'^help\[[0-9]*\].', l)] == ['format-hint=simple']
def test_daemon_option(node_factory):
# Lazy way to set up command line and env, plus do VALGRIND checks
l1 = node_factory.get_node()
l1.stop()
os.unlink(l1.rpc.socket_path)
logfname = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "log-daemon")
subprocess.run(l1.daemon.cmd_line + ['--daemon', '--log-file={}'.format(logfname)], env=l1.daemon.env,
check=True)
# Test some known output (wait for rpc to be ready)
wait_for(lambda: os.path.exists(l1.rpc.socket_path))
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'help']).decode('utf-8')
assert 'help [command]\n List available commands, or give verbose help on one {command}' in out
subprocess.run(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'.format(l1.daemon.lightning_dir),
'stop'], check=True)
# It should not complain that subdaemons aren't children.
with open(logfname, 'r') as f:
assert 'No child process' not in f.read()
@flaky
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_blockchaintrack(node_factory, bitcoind):
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr(addresstype='all')['p2sh-segwit']
######################################################################
# First failure scenario: rollback on startup doesn't work,
# and we try to add a block twice when rescanning:
l1.restart()
height = bitcoind.rpc.getblockcount() # 101
# At height 111 we receive an incoming payment
hashes = bitcoind.generate_block(9) # 102-110
bitcoind.rpc.sendtoaddress(addr, 1)
time.sleep(1) # mempool is still unpredictable
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(P2SH\).* CONFIRMED')
outputs = l1.rpc.listfunds()['outputs']
assert len(outputs) == 1
######################################################################
# Second failure scenario: perform a 20 block reorg
bitcoind.generate_block(10)
l1.daemon.wait_for_log('Adding block {}: '.format(height + 20))
# Now reorg out with a longer fork of 21 blocks
bitcoind.rpc.invalidateblock(hashes[0])
bitcoind.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'
.format(height + 1))
hashes = bitcoind.generate_block(30)
time.sleep(1)
bitcoind.rpc.getblockcount()
l1.daemon.wait_for_log('Adding block {}: '.format(height + 30))
# Our funds got reorged out, we should not have any funds that are confirmed
# NOTE: sendtoaddress() sets locktime=103 and the reorg at 102 invalidates that tx
# and deletes it from mempool
assert [o for o in l1.rpc.listfunds()['outputs'] if o['status'] != "unconfirmed"] == []
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_funding_reorg_private(node_factory, bitcoind):
# Rescan to detect reorg at restart and may_reconnect so channeld
# will restart. Reorg can cause bad gossip msg.
opts = {'funding-confirms': 2, 'rescan': 10, 'may_reconnect': True,
'allow_bad_gossip': True}
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
l1.fundwallet(10000000)
sync_blockheight(bitcoind, [l1]) # height 102
bitcoind.generate_block(3) # heights 103-105
l1.rpc.fundchannel(l2.info['id'], "all", announce=False)
bitcoind.generate_block(1) # height 106
daemon = 'DUALOPEND' if l1.config('experimental-dual-fund') else 'CHANNELD'
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'][0]['channels'])['status']
== ['{}_AWAITING_LOCKIN:Funding needs 1 more confirmations for lockin.'.format(daemon)])
bitcoind.generate_block(1) # height 107
l1.wait_channel_active('106x1x0')
l1.stop()
# Create a fork that changes short_channel_id from 106x1x0 to 108x1x0
bitcoind.simple_reorg(106, 2) # heights 106-108
bitcoind.generate_block(1) # height 109 (to reach minimum_depth=2 again)
l1.start()
# l2 was running, sees last stale block being removed
l2.daemon.wait_for_logs([r'Removing stale block {}'.format(106),
r'Got depth change .->{} for .* REORG'.format(0)])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('106x1x0')['channels']] == [False, False])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('108x1x0')['channels']] == [True, True])
l1.rpc.close(l2.info['id'])
bitcoind.generate_block(1, True)
l1.daemon.wait_for_log(r'Deleting channel')
l2.daemon.wait_for_log(r'Deleting channel')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_funding_reorg_remote_lags(node_factory, bitcoind):
# may_reconnect so channeld will restart; bad gossip can happen due to reorg
opts = {'funding-confirms': 1, 'may_reconnect': True, 'allow_bad_gossip': True}
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
l1.fundwallet(10000000)
sync_blockheight(bitcoind, [l1]) # height 102
l1.rpc.fundchannel(l2.info['id'], "all")
bitcoind.generate_block(5) # heights 103 - 107
l1.wait_channel_active('103x1x0')
# Make l2 temporary blind for blocks > 107
def no_more_blocks(req):
return {"result": None,
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
l2.daemon.rpcproxy.mock_rpc('getblockhash', no_more_blocks)
# Reorg changes short_channel_id 103x1x0 to 104x1x0, l1 sees it, restarts channeld
bitcoind.simple_reorg(103, 1) # heights 103 - 108
# But now it's height 104, we need another block to make it announcable.
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Peer transient failure .* short_channel_id changed to 104x1x0 \(was 103x1x0\)')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Funding transaction locked. They need our announcement signatures.'])
# Unblinding l2 brings it back in sync, restarts channeld and sends its announce sig
l2.daemon.rpcproxy.mock_rpc('getblockhash', None)
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('103x1x0')['channels']] == [False, False])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels('104x1x0')['channels']] == [True, True])
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'][0]['channels'])['status'] == [
'CHANNELD_NORMAL:Reconnected, and reestablished.',
'CHANNELD_NORMAL:Funding transaction locked. Channel announced.'])
l1.rpc.close(l2.info['id'])
bitcoind.generate_block(1, True)
l1.daemon.wait_for_log(r'Deleting channel')
l2.daemon.wait_for_log(r'Deleting channel')
def test_rescan(node_factory, bitcoind):
l1 = node_factory.get_node()
# The first start should start at current_height - 30 = 71, make sure
# it's not earlier
l1.daemon.wait_for_log(r'Adding block 101')
assert not l1.daemon.is_in_log(r'Adding block 70')
# Restarting with a higher rescan should go back further
l1.daemon.opts['rescan'] = 50
l1.restart()
l1.daemon.wait_for_log(r'Adding block 101')
assert l1.daemon.is_in_log(r'Adding block 51')
assert not l1.daemon.is_in_log(r'Adding block 50')
# Restarting with an absolute rescan should start from there
l1.daemon.opts['rescan'] = -31
l1.restart()
l1.daemon.wait_for_log(r'Adding block 101')
assert l1.daemon.is_in_log(r'Adding block 31')
assert not l1.daemon.is_in_log(r'Adding block 30')
# Restarting with a future absolute blockheight should *fail* if we
# can't find that height
l1.daemon.opts['rescan'] = -500000
l1.stop()
bitcoind.generate_block(4)
with pytest.raises(ValueError):
l1.start()
# Restarting with future absolute blockheight is fine if we can find it.
l1.daemon.opts['rescan'] = -105
oldneedle = l1.daemon.logsearch_start
l1.start()
# This could occur before pubkey msg, so move search needle back.
l1.daemon.logsearch_start = oldneedle
l1.daemon.wait_for_log(r'Adding block 105')
assert not l1.daemon.is_in_log(r'Adding block 102')
def test_bitcoind_goes_backwards(node_factory, bitcoind):
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
l1.stop()
# Now shrink chain (invalidateblock leaves 'headers' field until restart)
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
# Restart without killing proxies
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
# Will simply refuse to start.
with pytest.raises(ValueError):
l1.start()
# Nor will it start with if we ask for a reindex of fewer blocks.
l1.daemon.opts['rescan'] = 3
with pytest.raises(ValueError):
l1.start()
# This will force it, however.
l1.daemon.opts['rescan'] = -100
l1.start()
# Now mess with bitcoind at runtime.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_log('Adding block 110')
bitcoind.rpc.invalidateblock(bitcoind.rpc.getblockhash(105))
bitcoind.rpc.stop()
TailableProc.stop(bitcoind)
bitcoind.start()
bitcoind.generate_block(5)
# It will ignore bitcoind and keep asking for block 110.
time.sleep(5)
assert l1.rpc.getinfo()['blockheight'] == 110
assert not l1.daemon.is_in_log('Adding block 109',
start=l1.daemon.logsearch_start)
# Get past that, and it will suddenly read new blocks
bitcoind.generate_block(2)
l1.daemon.wait_for_log('Adding block 109')
l1.daemon.wait_for_log('Adding block 110')
l1.daemon.wait_for_log('Adding block 111')
@flaky
def test_reserve_enforcement(node_factory, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True, 'allow_warning': True})
# Pay 1000 satoshi to l2.
l1.pay(l2, 1000000)
l2.stop()
# They should both aim for 1%.
reserves = l2.db.query('SELECT channel_reserve_satoshis FROM channel_configs')
assert reserves == [{'channel_reserve_satoshis': 10**6 // 100}] * 2
# Edit db to reduce reserve to 0 so it will try to violate it.
l2.db.execute('UPDATE channel_configs SET channel_reserve_satoshis=0')
l2.start()
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# This should be impossible to pay entire thing back: l1 should warn and
# close connection for trying to violate reserve.
executor.submit(l2.pay, l1, 1000000)
l1.daemon.wait_for_log(
'Peer transient failure in CHANNELD_NORMAL: channeld.*'
' CHANNEL_ERR_CHANNEL_CAPACITY_EXCEEDED'
)
assert only_one(l1.rpc.listpeers()['peers'])['connected'] is False
@unittest.skipIf(not DEVELOPER, "needs dev_disconnect")
def test_htlc_send_timeout(node_factory, bitcoind, compat):
# Feerates identical so we don't get gratuitous commit to update them
l1, l2, l3 = node_factory.line_graph(3, opts=[{'log-level': 'io',
'feerates': (7500, 7500, 7500, 7500)},
# Blackhole it after it sends HTLC_ADD to l3.
{'log-level': 'io',
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['0WIRE_UPDATE_ADD_HTLC']},
{}],
wait_for_announce=True)
chanid2 = l2.get_channel_scid(l3)
# Make sure we have 30 seconds without any incoming traffic from l3 to l2
# so it tries to ping before sending WIRE_COMMITMENT_SIGNED.
timedout = False
while not timedout:
try:
l2.daemon.wait_for_log(r'channeld-chan#[0-9]*: \[IN\] ', timeout=30)
except TimeoutError:
timedout = True
inv = l3.rpc.invoice(123000, 'test_htlc_send_timeout', 'description')
with pytest.raises(RpcError, match=r'Ran out of routes to try after [0-9]+ attempt[s]?') as excinfo:
l1.rpc.pay(inv['bolt11'])
err = excinfo.value
# Complains it stopped after several attempts.
# FIXME: include in pylightning
PAY_STOPPED_RETRYING = 210
assert err.error['code'] == PAY_STOPPED_RETRYING
status = only_one(l1.rpc.call('paystatus')['pay'])
# Temporary channel failure
assert status['attempts'][0]['failure']['data']['failcode'] == 0x1007
assert status['attempts'][0]['failure']['data']['erring_node'] == l2.info['id']
assert status['attempts'][0]['failure']['data']['erring_channel'] == chanid2
# L2 should send ping, but never receive pong so never send commitment.
l2.daemon.wait_for_log(r'{}-.*channeld.*: \[OUT\] 0012'.format(l3.info['id']))
assert not l2.daemon.is_in_log(r'{}-.*channeld.*: \[IN\] 0013'.format(l3.info['id']))
assert not l2.daemon.is_in_log(r'{}-.*channeld.*: \[OUT\] 0084'.format(l3.info['id']))
# L2 killed the channel with l3 because it was too slow.
l2.daemon.wait_for_log('{}-.*channeld-.*Adding HTLC too slow: killing connection'.format(l3.info['id']))
def test_ipv4_and_ipv6(node_factory):
port = reserve()
l1 = node_factory.get_node(options={'addr': ':{}'.format(port)})
bind = l1.rpc.getinfo()['binding']
if len(bind) == 2:
assert bind[0]['type'] == 'ipv6'
assert bind[0]['address'] == '::'
assert int(bind[0]['port']) == port
assert bind[1]['type'] == 'ipv4'
assert bind[1]['address'] == '0.0.0.0'
assert int(bind[1]['port']) == port
else:
# Assume we're IPv4 only...
assert len(bind) == 1
assert bind[0]['type'] == 'ipv4'
assert bind[0]['address'] == '0.0.0.0'
assert int(bind[0]['port']) == port
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Fees on elements are different")
@unittest.skipIf(
not DEVELOPER or DEPRECATED_APIS, "Without DEVELOPER=1 we snap to "
"FEERATE_FLOOR on testnets, and we test the new API."
)
def test_feerates(node_factory):
l1 = node_factory.get_node(options={'log-level': 'io'}, start=False)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', {
'error': {"errors": ["Insufficient data or no feerate found"], "blocks": 0}
})
l1.start()
# All estimation types
types = ["opening", "mutual_close", "unilateral_close", "delayed_to_us",
"htlc_resolution", "penalty"]
# Query feerates (shouldn't give any!)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 2)
feerates = l1.rpc.feerates('perkw')
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 2**32 - 1
assert feerates['perkw']['min_acceptable'] == 253
for t in types:
assert t not in feerates['perkw']
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == 2)
feerates = l1.rpc.feerates('perkb')
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkw' not in feerates
assert feerates['perkb']['max_acceptable'] == (2**32 - 1)
assert feerates['perkb']['min_acceptable'] == 253 * 4
for t in types:
assert t not in feerates['perkb']
# Now try setting them, one at a time.
# Set CONSERVATIVE/2 feerate, for max and unilateral_close
l1.set_feerates((15000, 0, 0, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 3)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 253
# Set CONSERVATIVE/3 feerate, for htlc_resolution and penalty
l1.set_feerates((15000, 11000, 0, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) == 5)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['perkw']['htlc_resolution'] == 11000
assert feerates['perkw']['penalty'] == 11000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 253
# Set ECONOMICAL/4 feerate, for all but min (so, no mutual_close feerate)
l1.set_feerates((15000, 11000, 6250, 0), True)
wait_for(lambda: len(l1.rpc.feerates('perkb')['perkb']) == len(types) - 1 + 2)
feerates = l1.rpc.feerates('perkb')
assert feerates['perkb']['unilateral_close'] == 15000 * 4
assert feerates['perkb']['htlc_resolution'] == 11000 * 4
assert feerates['perkb']['penalty'] == 11000 * 4
assert 'mutual_close' not in feerates['perkb']
for t in types:
if t not in ("unilateral_close", "htlc_resolution", "penalty", "mutual_close"):
assert feerates['perkb'][t] == 25000
assert feerates['warning_missing_feerates'] == 'Some fee estimates unavailable: bitcoind startup?'
assert 'perkw' not in feerates
assert feerates['perkb']['max_acceptable'] == 15000 * 4 * 10
assert feerates['perkb']['min_acceptable'] == 253 * 4
# Set ECONOMICAL/100 feerate for min
l1.set_feerates((15000, 11000, 6250, 5000), True)
wait_for(lambda: len(l1.rpc.feerates('perkw')['perkw']) >= len(types) + 2)
feerates = l1.rpc.feerates('perkw')
assert feerates['perkw']['unilateral_close'] == 15000
assert feerates['perkw']['htlc_resolution'] == 11000
assert feerates['perkw']['penalty'] == 11000
assert feerates['perkw']['mutual_close'] == 5000
for t in types:
if t not in ("unilateral_close", "htlc_resolution", "penalty", "mutual_close"):
assert feerates['perkw'][t] == 25000 // 4
assert 'warning' not in feerates
assert 'perkb' not in feerates
assert feerates['perkw']['max_acceptable'] == 15000 * 10
assert feerates['perkw']['min_acceptable'] == 5000 // 2
assert len(feerates['onchain_fee_estimates']) == 5
assert feerates['onchain_fee_estimates']['opening_channel_satoshis'] == feerates['perkw']['opening'] * 702 // 1000
assert feerates['onchain_fee_estimates']['mutual_close_satoshis'] == feerates['perkw']['mutual_close'] * 673 // 1000
assert feerates['onchain_fee_estimates']['unilateral_close_satoshis'] == feerates['perkw']['unilateral_close'] * 598 // 1000
htlc_feerate = feerates["perkw"]["htlc_resolution"]
htlc_timeout_cost = feerates["onchain_fee_estimates"]["htlc_timeout_satoshis"]
htlc_success_cost = feerates["onchain_fee_estimates"]["htlc_success_satoshis"]
if EXPERIMENTAL_FEATURES:
# option_anchor_outputs
assert htlc_timeout_cost == htlc_feerate * 666 // 1000
assert htlc_success_cost == htlc_feerate * 706 // 1000
else:
assert htlc_timeout_cost == htlc_feerate * 663 // 1000
assert htlc_success_cost == htlc_feerate * 703 // 1000
def test_logging(node_factory):
# Since we redirect, node.start() will fail: do manually.
l1 = node_factory.get_node(options={'log-file': 'logfile'}, start=False)
logpath = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile')
logpath_moved = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved')
l1.daemon.start(wait_for_initialized=False)
wait_for(lambda: os.path.exists(logpath))
shutil.move(logpath, logpath_moved)
l1.daemon.proc.send_signal(signal.SIGHUP)
wait_for(lambda: os.path.exists(logpath_moved))
wait_for(lambda: os.path.exists(logpath))
log1 = open(logpath_moved).readlines()
assert log1[-1].endswith("Ending log due to SIGHUP\n")
def check_new_log():
log2 = open(logpath).readlines()
return len(log2) > 0 and log2[0].endswith("Started log due to SIGHUP\n")
wait_for(check_new_log)
# Issue #4240
# Repeated SIGHUP should just re-open the log file
# and not terminate the daemon.
logpath_moved_2 = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'logfile_moved_2')
shutil.move(logpath, logpath_moved_2)
l1.daemon.proc.send_signal(signal.SIGHUP)
wait_for(lambda: os.path.exists(logpath_moved_2))
wait_for(lambda: os.path.exists(logpath))
wait_for(check_new_log)
@unittest.skipIf(VALGRIND,
"Valgrind sometimes fails assert on injected SEGV")
def test_crashlog(node_factory):
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
def has_crash_log(n):
files = os.listdir(os.path.join(n.daemon.lightning_dir, TEST_NETWORK))
crashfiles = [f for f in files if 'crash.log' in f]
return len(crashfiles) > 0
assert not has_crash_log(l1)
l1.daemon.proc.send_signal(signal.SIGSEGV)
wait_for(lambda: has_crash_log(l1))
def test_configfile_before_chdir(node_factory):
l1 = node_factory.get_node()
l1.stop()
olddir = os.getcwd()
# as lightning_dir ends in /, basename and dirname don't work as expected.
os.chdir(os.path.dirname(l1.daemon.lightning_dir[:-1]))
config = os.path.join(os.path.basename(l1.daemon.lightning_dir[:-1]), TEST_NETWORK, "test_configfile")
# Test both an early arg and a normal arg.
with open(config, 'wb') as f:
f.write(b'always-use-proxy=true\n')
f.write(b'proxy=127.0.0.1:100\n')
l1.daemon.opts['conf'] = config
# Update executable to point to right place
l1.daemon.executable = os.path.join(olddir, l1.daemon.executable)
l1.start()
assert l1.rpc.listconfigs()['always-use-proxy']
assert l1.rpc.listconfigs()['proxy'] == '127.0.0.1:100'
os.chdir(olddir)
def test_json_error(node_factory):
l1 = node_factory.get_node()
with pytest.raises(RpcError, match=r'id: should be a channel ID or short channel ID: invalid token'):
l1.rpc.close({"tx": "020000000001011490f737edd2ea2175a032b58ea7cd426dfc244c339cd044792096da3349b18a0100000000ffffffff021c900300000000001600140e64868e2f752314bc82a154c8c5bf32f3691bb74da00b00000000002200205b8cd3b914cf67cdd8fa6273c930353dd36476734fbd962102c2df53b90880cd0247304402202b2e3195a35dc694bbbc58942dc9ba59cc01d71ba55c9b0ad0610ccd6a65633702201a849254453d160205accc00843efb0ad1fe0e186efa6a7cee1fb6a1d36c736a012103d745445c9362665f22e0d96e9e766f273f3260dea39c8a76bfa05dd2684ddccf00000000", "txid": "2128c10f0355354479514f4a23eaa880d94e099406d419bbb0d800143accddbb", "channel_id": "bbddcc3a1400d8b0bb19d40694094ed980a8ea234a4f5179443555030fc12820"})
# Should not corrupt following RPC
l1.rpc.getinfo()
def test_check_command(node_factory):
l1 = node_factory.get_node()
l1.rpc.check(command_to_check='help')
l1.rpc.check(command_to_check='help', command='check')
# Note: this just checks form, not whether it's valid!
l1.rpc.check(command_to_check='help', command='badcommand')
with pytest.raises(RpcError, match=r'Unknown command'):
l1.rpc.check(command_to_check='badcommand')
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.check(command_to_check='help', badarg='x')
# Ensures we have compulsory parameters.
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='connect')
# Even with optional parameters.
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='connect', host='x', port=77)
# Makes sure parameter types are correct.
with pytest.raises(RpcError, match=r'should be an integer'):
l1.rpc.check(command_to_check='connect', id='test', host='x', port="abcd")
# FIXME: python wrapper doesn't let us test array params.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(l1.rpc.socket_path)
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' in obj
assert 'error' not in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "check"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' in obj
assert 'error' not in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["help", "a", "b"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["badcommand"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.sendall(b'{"id":1, "jsonrpc":"2.0","method":"check","params":["connect", "test", "x", "abcd"]}')
obj, _ = l1.rpc._readobj(sock, b'')
assert obj['id'] == 1
assert 'result' not in obj
assert 'error' in obj
sock.close()
@unittest.skipIf(not DEVELOPER, "FIXME: without DEVELOPER=1 we timeout")
def test_bad_onion(node_factory, bitcoind):
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True,
opts={'log-level': 'io'})
h = l4.rpc.invoice(123000, 'test_bad_onion', 'description')['payment_hash']
route = l1.rpc.getroute(l4.info['id'], 123000, 1)['route']
assert len(route) == 3
mangled_nodeid = '0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6518'
# Replace id with a different pubkey, so onion encoded badly at third hop.
route[2]['id'] = mangled_nodeid
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
PAY_TRY_OTHER_ROUTE = 204
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
assert err.value.error['data']['erring_node'] == mangled_nodeid
assert err.value.error['data']['erring_channel'] == route[2]['channel']
# We should see a WIRE_UPDATE_FAIL_MALFORMED_HTLC from l4.
line = l4.daemon.is_in_log(r'\[OUT\] 0087')
# 008739d3149a5c37e95f9dae718ce46efc60248e110e10117d384870a6762e8e33030000000000000000d7fc52f6c32773aabca55628fe616058aecc44a384e0abfa85c0c48b449dd38dc005
# type<--------------channelid---------------------------------------><--htlc-id-----><--------------------------------------------- sha_of_onion --->code
sha = re.search(r' 0087.{64}.{16}(.{64})', line).group(1)
# Should see same sha in onionreply
l1.daemon.wait_for_log(r'failcode .* from onionreply .*{sha}'.format(sha=sha))
# Replace id with a different pubkey, so onion encoded badly at second hop.
route[1]['id'] = mangled_nodeid
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_TRY_OTHER_ROUTE 204
PAY_TRY_OTHER_ROUTE = 204
assert err.value.error['code'] == PAY_TRY_OTHER_ROUTE
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
assert err.value.error['data']['erring_node'] == mangled_nodeid
assert err.value.error['data']['erring_channel'] == route[1]['channel']
@unittest.skipIf(not DEVELOPER, "Needs DEVELOPER=1 to force onion fail")
def test_bad_onion_immediate_peer(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, opts={'dev-fail-process-onionpacket': None})
h = l2.rpc.invoice(123000, 'test_bad_onion_immediate_peer', 'description')['payment_hash']
route = l1.rpc.getroute(l2.info['id'], 123000, 1)['route']
assert len(route) == 1
l1.rpc.sendpay(route, h)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(h)
# FIXME: #define PAY_UNPARSEABLE_ONION 202
PAY_UNPARSEABLE_ONION = 202
assert err.value.error['code'] == PAY_UNPARSEABLE_ONION
# FIXME: WIRE_INVALID_ONION_HMAC = BADONION|PERM|5
WIRE_INVALID_ONION_HMAC = 0x8000 | 0x4000 | 5
assert err.value.error['data']['failcode'] == WIRE_INVALID_ONION_HMAC
def test_newaddr(node_factory, chainparams):
l1 = node_factory.get_node()
p2sh = l1.rpc.newaddr('p2sh-segwit')
assert 'bech32' not in p2sh
assert p2sh['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
bech32 = l1.rpc.newaddr('bech32')
assert 'p2sh-segwit' not in bech32
assert bech32['bech32'].startswith(chainparams['bip173_prefix'])
both = l1.rpc.newaddr('all')
assert both['p2sh-segwit'].startswith(chainparams['p2sh_prefix'])
assert both['bech32'].startswith(chainparams['bip173_prefix'])
def test_newaddr_deprecated(node_factory, chainparams):
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
p2sh = l1.rpc.newaddr('p2sh-segwit')
assert p2sh['address'].startswith(chainparams['p2sh_prefix'])
bech32 = l1.rpc.newaddr('bech32')
assert bech32['address'].startswith(chainparams['bip173_prefix'])
def test_bitcoind_fail_first(node_factory, bitcoind, executor):
# Do not start the lightning node since we need to instrument bitcoind
# first.
l1 = node_factory.get_node(start=False)
# Instrument bitcoind to fail some queries first.
def mock_fail(*args):
raise ValueError()
l1.daemon.rpcproxy.mock_rpc('getblockhash', mock_fail)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_fail)
f = executor.submit(l1.start)
wait_for(lambda: l1.daemon.running)
# Make sure it fails on the first `getblock` call (need to use `is_in_log`
# since the `wait_for_log` in `start` sets the offset)
wait_for(lambda: l1.daemon.is_in_log(
r'getblockhash [a-z0-9]* exited with status 1'))
wait_for(lambda: l1.daemon.is_in_log(
r'Unable to estimate opening fees'))
# Now unset the mock, so calls go through again
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
l1.daemon.rpcproxy.mock_rpc('estimatesmartfee', None)
f.result()
@unittest.skipIf(not DEVELOPER, "needs --dev-force-bip32-seed")
@unittest.skipIf(TEST_NETWORK != 'regtest', "Addresses are network specific")
def test_dev_force_bip32_seed(node_factory):
l1 = node_factory.get_node(options={'dev-force-bip32-seed': '0000000000000000000000000000000000000000000000000000000000000001'})
# First is m/0/0/1 ..
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qsdzqt93xsyewdjvagndw9523m27e52er5ca7hm"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qlkt93775wmf33uacykc49v2j4tayn0yj25msjn"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1q2ng546gs0ylfxrvwx0fauzcvhuz655en4kwe2c"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1qrdpwrlrmrnvn535l5eldt64lxm8r2nwkv0ruxq"
bech32 = l1.rpc.newaddr('bech32')['bech32']
assert bech32 == "bcrt1q622lwmdzxxterumd746eu3d3t40pq53p62zhlz"
@unittest.skipIf(not DEVELOPER, "needs dev command")
def test_dev_demux(node_factory):
l1 = node_factory.get_node(may_fail=True, allow_broken_log=True)
# Check should work.
l1.rpc.check(command_to_check='dev', subcommand='crash')
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec=1000)
l1.rpc.check(command_to_check='dev', subcommand='rhash', secret='00' * 32)
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.check(command_to_check='dev', subcommand='foobar')
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.check(command_to_check='dev', subcommand='crash', unk=1)
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.check(command_to_check='dev', subcommand='slowcmd', msec='aaa')
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='dev', subcommand='rhash')
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.check(command_to_check='dev')
# Non-check failures should fail, in both object and array form.
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.call('dev', {'subcommand': 'foobar'})
with pytest.raises(RpcError, match=r'Unknown subcommand'):
l1.rpc.call('dev', ['foobar'])
with pytest.raises(RpcError, match=r'unknown parameter'):
l1.rpc.call('dev', {'subcommand': 'crash', 'unk': 1})
with pytest.raises(RpcError, match=r'too many parameters'):
l1.rpc.call('dev', ['crash', 1])
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': 'aaa'})
with pytest.raises(RpcError, match=r"msec: should be an integer: invalid token"):
l1.rpc.call('dev', ['slowcmd', 'aaa'])
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev', {'subcommand': 'rhash'})
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev', ['rhash'])
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('dev')
# Help should list them all.
assert 'subcommand=crash|rhash|slowcmd' in l1.rpc.help('dev')['help'][0]['command']
# These work
assert l1.rpc.call('dev', ['slowcmd', '7'])['msec'] == 7
assert l1.rpc.call('dev', {'subcommand': 'slowcmd', 'msec': '7'})['msec'] == 7
assert l1.rpc.call('dev', {'subcommand': 'rhash', 'secret': '00' * 32})['rhash'] == '66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925'
with pytest.raises(RpcError):
l1.rpc.call('dev', {'subcommand': 'crash'})
def test_list_features_only(node_factory):
features = subprocess.check_output(['lightningd/lightningd',
'--list-features-only']).decode('utf-8').splitlines()
expected = ['option_data_loss_protect/odd',
'option_upfront_shutdown_script/odd',
'option_gossip_queries/odd',
'option_var_onion_optin/odd',
'option_gossip_queries_ex/odd',
'option_static_remotekey/odd',
'option_payment_secret/odd',
'option_basic_mpp/odd',
]
if EXPERIMENTAL_FEATURES:
expected += ['option_anchor_outputs/odd']
expected += ['option_unknown_102/odd']
assert features == expected
def test_relative_config_dir(node_factory):
l1 = node_factory.get_node(start=False)
initial_dir = os.getcwd()
lndir = l1.daemon.opts.get("lightning-dir")[:-1]
*root_dir, l1.daemon.opts["lightning-dir"] = lndir.split('/')
os.chdir('/'.join(root_dir))
l1.daemon.executable = os.path.join(initial_dir, l1.daemon.executable)
l1.start()
assert os.path.isabs(l1.rpc.listconfigs()["lightning-dir"])
l1.stop()
os.chdir(initial_dir)
def test_signmessage(node_factory):
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
corpus = [[None,
"this is a test!",
l1.rpc.signmessage("this is a test!")['zbase'],
l1.info['id']]]
# Other contributions from LND users!
corpus += [
['@bitconner',
"is this compatible?",
'rbgfioj114mh48d8egqx8o9qxqw4fmhe8jbeeabdioxnjk8z3t1ma1hu1fiswpakgucwwzwo6ofycffbsqusqdimugbh41n1g698hr9t',
'02b80cabdf82638aac86948e4c06e82064f547768dcef977677b9ea931ea75bab5'],
['@duck1123',
'hi',
'rnrphcjswusbacjnmmmrynh9pqip7sy5cx695h6mfu64iac6qmcmsd8xnsyczwmpqp9shqkth3h4jmkgyqu5z47jfn1q7gpxtaqpx4xg',
'02de60d194e1ca5947b59fe8e2efd6aadeabfb67f2e89e13ae1a799c1e08e4a43b'],
['@jochemin',
'hi',
'ry8bbsopmduhxy3dr5d9ekfeabdpimfx95kagdem7914wtca79jwamtbw4rxh69hg7n6x9ty8cqk33knbxaqftgxsfsaeprxkn1k48p3',
'022b8ece90ee891cbcdac0c1cc6af46b73c47212d8defbce80265ac81a6b794931'],
]
for c in corpus:
print("Shout out to {}".format(c[0]))
assert subprocess.check_output(['devtools/lightning-checkmessage',
c[1], c[2]]).decode('utf-8') == "Signature claims to be from key {}\n".format(c[3])
subprocess.run(['devtools/lightning-checkmessage', c[1], c[2], c[3]], check=True)
with pytest.raises(subprocess.CalledProcessError):
subprocess.run(['devtools/lightning-checkmessage',
c[1] + "modified", c[2], c[3]], check=True)
assert l1.rpc.checkmessage(c[1], c[2], c[3])['verified']
assert not l1.rpc.checkmessage(c[1] + "modified", c[2], c[3])['verified']
checknokey = l1.rpc.checkmessage(c[1], c[2])
# Of course, we know our own pubkey
if c[3] == l1.info['id']:
assert checknokey['verified']
else:
assert not checknokey['verified']
assert checknokey['pubkey'] == c[3]
# l2 knows about l1, so it can validate it.
zm = l1.rpc.signmessage(message="message for you")['zbase']
checknokey = l2.rpc.checkmessage(message="message for you", zbase=zm)
assert checknokey['pubkey'] == l1.info['id']
assert checknokey['verified']
def test_include(node_factory):
l1 = node_factory.get_node(start=False)
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), "subdir")
os.makedirs(subdir)
with open(os.path.join(subdir, "conf1"), 'w') as f:
f.write('include conf2')
with open(os.path.join(subdir, "conf2"), 'w') as f:
f.write('alias=conf2')
l1.daemon.opts['conf'] = os.path.join(subdir, "conf1")
l1.start()
assert l1.rpc.listconfigs('alias')['alias'] == 'conf2'
def test_config_in_subdir(node_factory, chainparams):
l1 = node_factory.get_node(start=False)
network = chainparams['name']
subdir = os.path.join(l1.daemon.opts.get("lightning-dir"), network)
with open(os.path.join(subdir, "config"), 'w') as f:
f.write('alias=test_config_in_subdir')
l1.start()
assert l1.rpc.listconfigs('alias')['alias'] == 'test_config_in_subdir'
l1.stop()
# conf is not allowed in any config file.
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('conf={}/conf'.format(network))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "conf: not permitted in configuration files" in out.stderr.decode('utf-8')
# network is allowed in root config file.
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('network={}'.format(network))
l1.start()
l1.stop()
# but not in network config file.
with open(os.path.join(subdir, "config"), 'w') as f:
f.write('network={}'.format(network))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "network: not permitted in network-specific configuration files" in out.stderr.decode('utf-8')
# lightning-dir only allowed if we explicitly use --conf
os.unlink(os.path.join(subdir, "config"))
with open(os.path.join(l1.daemon.opts.get("lightning-dir"), "config"), 'w') as f:
f.write('lightning-dir={}/test'.format(l1.daemon.opts.get("lightning-dir")))
out = subprocess.run(['lightningd/lightningd',
'--lightning-dir={}'.format(l1.daemon.opts.get("lightning-dir"))],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 1
assert "lightning-dir: not permitted in implicit configuration files" in out.stderr.decode('utf-8')
l1.daemon.opts['conf'] = os.path.join(l1.daemon.opts.get("lightning-dir"), "config")
l1.start()
def restore_valgrind(node, subdir):
for f in os.listdir(subdir):
if f.startswith('valgrind-errors.'):
shutil.move(os.path.join(subdir, f),
node.daemon.opts.get("lightning-dir"))
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
def test_testnet_upgrade(node_factory):
l1 = node_factory.get_node(start=False, may_fail=True)
basedir = l1.daemon.opts.get("lightning-dir")
# Make it old-style
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
os.path.join(basedir, 'hsm_secret'))
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
# Add (empty!) config file; it should be left in place.
with open(os.path.join(basedir, 'config'), 'wb') as f:
f.write(b"
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
pass
# We need to allow this, otherwise no upgrade!
del l1.daemon.opts['allow-deprecated-apis']
# We want to test default network
del l1.daemon.opts['network']
# Wrong chain, will fail to start, but that's OK.
with pytest.raises(ValueError):
l1.start()
netdir = os.path.join(basedir, "testnet")
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
.format(netdir))
# Should move these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert os.path.isfile(os.path.join(netdir, "another_file"))
assert not os.path.isfile(os.path.join(basedir, "another_file"))
# Should NOT move these
assert not os.path.isfile(os.path.join(netdir, "lightningd-testnet.pid"))
assert os.path.isfile(os.path.join(basedir, "lightningd-testnet.pid"))
assert not os.path.isfile(os.path.join(netdir, "config"))
assert os.path.isfile(os.path.join(basedir, "config"))
restore_valgrind(l1, netdir)
@unittest.skipIf(env('COMPAT') != 1, "Upgrade code requires COMPAT_V073")
def test_regtest_upgrade(node_factory):
l1 = node_factory.get_node(start=False)
basedir = l1.daemon.opts.get("lightning-dir")
netdir = os.path.join(basedir, TEST_NETWORK)
# Make it old-style
os.rename(os.path.join(basedir, TEST_NETWORK, 'hsm_secret'),
os.path.join(basedir, 'hsm_secret'))
shutil.rmtree(os.path.join(basedir, TEST_NETWORK))
# Add config file which tells us it's regtest; it should be left in place.
with open(os.path.join(basedir, 'config'), 'wb') as f:
f.write(bytes("network={}".format(TEST_NETWORK), "utf-8"))
with open(os.path.join(basedir, 'another_file'), 'wb') as f:
pass
# We need to allow this, otherwise no upgrade!
del l1.daemon.opts['allow-deprecated-apis']
# It should get this from the config file.
del l1.daemon.opts['network']
l1.start()
assert l1.daemon.is_in_log("Moving hsm_secret into {}/".format(netdir))
assert l1.daemon.is_in_log("Moving another_file into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving config into {}/".format(netdir))
assert not l1.daemon.is_in_log("Moving lightningd-testnet.pid into {}/"
.format(netdir))
# Should move these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert os.path.isfile(os.path.join(netdir, "another_file"))
assert not os.path.isfile(os.path.join(basedir, "another_file"))
# Should NOT move these
assert not os.path.isfile(os.path.join(netdir, "lightningd-{}.pid".format(TEST_NETWORK)))
assert os.path.isfile(os.path.join(basedir, "lightningd-{}.pid".format(TEST_NETWORK)))
assert not os.path.isfile(os.path.join(netdir, "config"))
assert os.path.isfile(os.path.join(basedir, "config"))
# Should restart fine
l1.restart()
restore_valgrind(l1, netdir)
@unittest.skipIf(VALGRIND, "valgrind files can't be written since we rmdir")
@unittest.skipIf(TEST_NETWORK != "regtest", "needs bitcoin mainnet")
def test_new_node_is_mainnet(node_factory):
l1 = node_factory.get_node(start=False, may_fail=True)
basedir = l1.daemon.opts.get("lightning-dir")
netdir = os.path.join(basedir, "bitcoin")
shutil.rmtree(basedir)
# Don't suppress upgrade (though it shouldn't happen!)
del l1.daemon.opts['allow-deprecated-apis']
# We want to test default network
del l1.daemon.opts['network']
# Wrong chain, will fail to start, but that's OK.
with pytest.raises(ValueError):
l1.start()
# Should create these
assert os.path.isfile(os.path.join(netdir, "hsm_secret"))
assert not os.path.isfile(os.path.join(basedir, "hsm_secret"))
assert not os.path.isfile(os.path.join(netdir, "lightningd-bitcoin.pid"))
assert os.path.isfile(os.path.join(basedir, "lightningd-bitcoin.pid"))
def test_unicode_rpc(node_factory, executor, bitcoind):
node = node_factory.get_node()
desc = "Some candy 🍬 and a nice glass of milk 🥛."
node.rpc.invoice(msatoshi=42, label=desc, description=desc)
invoices = node.rpc.listinvoices()['invoices']
assert(len(invoices) == 1)
assert(invoices[0]['description'] == desc)
assert(invoices[0]['label'] == desc)
@unittest.skipIf(VALGRIND, "Testing pyln doesn't exercise anything interesting in the c code.")
def test_unix_socket_path_length(node_factory, bitcoind, directory, executor, db_provider, test_base_dir):
lightning_dir = os.path.join(directory, "anode" + "far" * 30 + "away")
os.makedirs(lightning_dir)
db = db_provider.get_db(lightning_dir, "test_unix_socket_path_length", 1)
l1 = LightningNode(1, lightning_dir, bitcoind, executor, VALGRIND, db=db, port=node_factory.get_next_port())
# `LightningNode.start()` internally calls `LightningRpc.getinfo()` which
# exercises the socket logic, and raises an issue if it fails.
l1.start()
# Let's just call it again to make sure it really works.
l1.rpc.listconfigs()
l1.stop()
def test_waitblockheight(node_factory, executor, bitcoind):
node = node_factory.get_node()
sync_blockheight(bitcoind, [node])
blockheight = node.rpc.getinfo()['blockheight']
# Should succeed without waiting.
node.rpc.waitblockheight(blockheight - 2)
node.rpc.waitblockheight(blockheight - 1)
node.rpc.waitblockheight(blockheight)
# Should not succeed yet.
fut2 = executor.submit(node.rpc.waitblockheight, blockheight + 2)
fut1 = executor.submit(node.rpc.waitblockheight, blockheight + 1)
assert not fut1.done()
assert not fut2.done()
# Should take about ~1second and time out.
with pytest.raises(RpcError):
node.rpc.waitblockheight(blockheight + 2, 1)
# Others should still not be done.
assert not fut1.done()
assert not fut2.done()
# Trigger just one more block.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [node])
fut1.result(5)
assert not fut2.done()
# Trigger two blocks.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [node])
fut2.result(5)
@unittest.skipIf(not DEVELOPER, "Needs dev-sendcustommsg")
def test_sendcustommsg(node_factory):
opts = {'log-level': 'io', 'plugin': [
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_b.py"),
os.path.join(os.path.dirname(__file__), "plugins", "custommsg_a.py")
]}
l1, l2, l3, l4 = node_factory.get_nodes(4, opts=opts)
node_factory.join_nodes([l1, l2, l3])
l2.connect(l4)
l3.stop()
msg = r'ff' * 32
serialized = r'04070020' + msg
# This address doesn't exist so we should get an error when we try sending
# a message to it.
node_id = '02df5ffe895c778e10f7742a6c5b8a0cefbe9465df58b92fadeb883752c8107c8f'
with pytest.raises(RpcError, match=r'No such peer'):
l1.rpc.dev_sendcustommsg(node_id, msg)
# `l3` is disconnected and we can't send messages to it
assert(not l2.rpc.listpeers(l3.info['id'])['peers'][0]['connected'])
with pytest.raises(RpcError, match=r'Peer is not connected'):
l2.rpc.dev_sendcustommsg(l3.info['id'], msg)
# We should not be able to send a bogus `ping` message, since it collides
# with a message defined in the spec, and could potentially mess up our
# internal state.
with pytest.raises(RpcError, match=r'Cannot send messages of type 18 .WIRE_PING.'):
l2.rpc.dev_sendcustommsg(l2.info['id'], r'0012')
# The sendcustommsg RPC call is currently limited to odd-typed messages,
# since they will not result in disconnections or even worse channel
# failures.
with pytest.raises(RpcError, match=r'Cannot send even-typed [0-9]+ custom message'):
l2.rpc.dev_sendcustommsg(l2.info['id'], r'00FE')
# This should work since the peer is currently owned by `channeld`
l2.rpc.dev_sendcustommsg(l1.info['id'], msg)
l2.daemon.wait_for_log(
r'{peer_id}-{owner}-chan#[0-9]: \[OUT\] {serialized}'.format(
owner='channeld', serialized=serialized, peer_id=l1.info['id']
)
)
l1.daemon.wait_for_log(r'\[IN\] {}'.format(serialized))
l1.daemon.wait_for_logs([
r'Got custommessage_a {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
r'Got custommessage_b {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id'])
])
# This should work since the peer is currently owned by `openingd`
l2.rpc.dev_sendcustommsg(l4.info['id'], msg)
l2.daemon.wait_for_log(
r'{peer_id}-{owner}-chan#[0-9]: \[OUT\] {serialized}'.format(
owner='openingd', serialized=serialized, peer_id=l4.info['id']
)
)
l4.daemon.wait_for_log(r'\[IN\] {}'.format(serialized))
l4.daemon.wait_for_logs([
r'Got custommessage_a {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
r'Got custommessage_b {serialized} from peer {peer_id}'.format(
serialized=serialized, peer_id=l2.info['id']),
])
def test_sendonionmessage(node_factory):
l1, l2, l3 = node_factory.line_graph(3, opts={'experimental-onion-messages': None})
blindedpathtool = os.path.join(os.path.dirname(__file__), "..", "devtools", "blindedpath")
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id']},
{'id': l3.info['id']}]})
assert l3.daemon.wait_for_log('Got onionmsg')
# Now by SCID.
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id'],
'short_channel_id': l2.get_channel_scid(l3)},
{'id': l3.info['id']}]})
assert l3.daemon.wait_for_log('Got onionmsg')
# Now test blinded path.
output = subprocess.check_output(
[blindedpathtool, '--simple-output', 'create', l2.info['id'], l3.info['id']]
).decode('ASCII').strip()
# First line is blinding, then <peerid> then <encblob>.
blinding, p1, p1enc, p2 = output.split('\n')
# First hop can't be blinded!
assert p1 == l2.info['id']
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id'],
'blinding': blinding,
'enctlv': p1enc},
{'id': p2}]})
assert l3.daemon.wait_for_log('Got onionmsg')
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs sendonionmessage")
def test_sendonionmessage_reply(node_factory):
blindedpathtool = os.path.join(os.path.dirname(__file__), "..", "devtools", "blindedpath")
plugin = os.path.join(os.path.dirname(__file__), "plugins", "onionmessage-reply.py")
l1, l2, l3 = node_factory.line_graph(3, opts={'plugin': plugin})
# Make reply path
output = subprocess.check_output(
[blindedpathtool, '--simple-output', 'create', l2.info['id'], l1.info['id']]
).decode('ASCII').strip()
# First line is blinding, then <peerid> then <encblob>.
blinding, p1, p1enc, p2 = output.split('\n')
# First hop can't be blinded!
assert p1 == l2.info['id']
# Also tests oversize payload which won't fit in 1366-byte onion.
l1.rpc.call('sendonionmessage',
{'hops':
[{'id': l2.info['id']},
{'id': l3.info['id'],
'invoice': '77' * 15000}],
'reply_path':
{'blinding': blinding,
'path': [{'id': p1, 'enctlv': p1enc}, {'id': p2}]}})
assert l3.daemon.wait_for_log('Got onionmsg reply_blinding reply_path')
assert l3.daemon.wait_for_log("Got onion_message invoice '{}'".format('77' * 15000))
assert l3.daemon.wait_for_log('Sent reply via')
assert l1.daemon.wait_for_log('Got onionmsg')
@unittest.skipIf(not DEVELOPER, "needs --dev-force-privkey")
def test_getsharedsecret(node_factory):
# From BOLT 8 test vectors.
options = [
{"dev-force-privkey": "1212121212121212121212121212121212121212121212121212121212121212"},
{}
]
l1, l2 = node_factory.get_nodes(2, opts=options)
# Check BOLT 8 test vectors.
shared_secret = l1.rpc.getsharedsecret("028d7500dd4c12685d1f568b4c2b5048e8534b873319f3a8daa612b469132ec7f7")['shared_secret']
assert (shared_secret == "1e2fb3c8fe8fb9f262f649f64d26ecf0f2c0a805a767cf02dc2d77a6ef1fdcc3")
# Clear the forced privkey of l1.
del l1.daemon.opts["dev-force-privkey"]
l1.restart()
# l1 and l2 can generate the same shared secret
# knowing only the public key of the other.
assert (l1.rpc.getsharedsecret(l2.info["id"])["shared_secret"]
== l2.rpc.getsharedsecret(l1.info["id"])["shared_secret"])
def test_commitfee_option(node_factory):
l1, l2 = node_factory.get_nodes(2, opts=[{"commit-fee": "200"}, {}])
mock_wu = 5000
for l in [l1, l2]:
l.set_feerates((mock_wu, 0, 0, 0), True)
l1_commit_fees = l1.rpc.call("estimatefees")["unilateral_close"]
l2_commit_fees = l2.rpc.call("estimatefees")["unilateral_close"]
assert l1_commit_fees == 2 * l2_commit_fees == 2 * 4 * mock_wu # WU->VB
def test_listtransactions(node_factory):
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
wallettxid = l1.openchannel(l2, 10**5)["wallettxid"]
txids = [i["txid"] for tx in l1.rpc.listtransactions()["transactions"]
for i in tx["inputs"]]
# The txid of the transaction funding the channel is present, and
# represented as little endian (like bitcoind and explorers).
assert wallettxid in txids
def test_listfunds(node_factory):
l1, l2 = node_factory.get_nodes(2, opts=[{}, {}])
open_txid = l1.openchannel(l2, 10**5)["wallettxid"]
# unspent outputs
utxos = l1.rpc.listfunds()["outputs"]
# only 1 unspent output should be available
assert len(utxos) == 1
# both unspent and spent outputs
all_outputs = l1.rpc.listfunds(spent=True)["outputs"]
txids = [output['txid'] for output in all_outputs]
# 1 spent output (channel opening) and 1 unspent output
assert len(all_outputs) == 2
assert open_txid in txids
| true | true |
1c360089d6ba1e46e87b782962383ee5f96b548f | 22,676 | py | Python | zerver/middleware.py | guettli/zulip | d9431a5e66a97c619aecfe92c4a2cb4acc609431 | [
"Apache-2.0"
] | null | null | null | zerver/middleware.py | guettli/zulip | d9431a5e66a97c619aecfe92c4a2cb4acc609431 | [
"Apache-2.0"
] | null | null | null | zerver/middleware.py | guettli/zulip | d9431a5e66a97c619aecfe92c4a2cb4acc609431 | [
"Apache-2.0"
] | null | null | null | import cProfile
import logging
import time
import traceback
from typing import Any, AnyStr, Callable, Dict, Iterable, List, MutableMapping, Optional, Union
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.db import connection
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.middleware.common import CommonMiddleware
from django.shortcuts import render
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import ugettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from sentry_sdk import capture_exception
from sentry_sdk.integrations.logging import ignore_logger
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
from zerver.lib.db import reset_queries
from zerver.lib.debug import maybe_tracemalloc_listen
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError, RateLimited
from zerver.lib.html_to_text import get_content_description
from zerver.lib.markdown import get_markdown_requests, get_markdown_time
from zerver.lib.rate_limiter import RateLimitResult
from zerver.lib.response import json_error, json_response_from_error, json_unauthorized
from zerver.lib.subdomains import get_subdomain
from zerver.lib.types import ViewFuncT
from zerver.lib.utils import statsd
from zerver.models import Realm, flush_per_request_caches, get_realm
logger = logging.getLogger('zulip.requests')
slow_query_logger = logging.getLogger('zulip.slow_queries')
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
log_data['time_stopped'] = time.time()
log_data['remote_cache_time_stopped'] = get_remote_cache_time()
log_data['remote_cache_requests_stopped'] = get_remote_cache_requests()
log_data['markdown_time_stopped'] = get_markdown_time()
log_data['markdown_requests_stopped'] = get_markdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_timer_stop(request: HttpRequest) -> None:
record_request_stop_data(request._log_data)
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data['time_restarted'] = time.time()
log_data['remote_cache_time_restarted'] = get_remote_cache_time()
log_data['remote_cache_requests_restarted'] = get_remote_cache_requests()
log_data['markdown_time_restarted'] = get_markdown_time()
log_data['markdown_requests_restarted'] = get_markdown_requests()
def async_request_timer_restart(request: HttpRequest) -> None:
if "time_restarted" in request._log_data:
# Don't destroy data when being called from
# finish_current_handler
return
record_request_restart_data(request._log_data)
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
reset_queries()
log_data['time_started'] = time.time()
log_data['remote_cache_time_start'] = get_remote_cache_time()
log_data['remote_cache_requests_start'] = get_remote_cache_requests()
log_data['markdown_time_start'] = get_markdown_time()
log_data['markdown_requests_start'] = get_markdown_requests()
def timedelta_ms(timedelta: float) -> float:
return timedelta * 1000
def format_timedelta(timedelta: float) -> str:
if (timedelta >= 1):
return f"{timedelta:.1f}s"
return f"{timedelta_ms(timedelta):.0f}ms"
def is_slow_query(time_delta: float, path: str) -> bool:
if time_delta < 1.2:
return False
is_exempt = \
path in ["/activity", "/json/report/error",
"/api/v1/deployments/report_error"] \
or path.startswith("/realm_activity/") \
or path.startswith("/user_activity/")
if is_exempt:
return time_delta >= 5
if 'webathena_kerberos' in path:
return time_delta >= 10
return True
statsd_blacklisted_requests = [
'do_confirm', 'signup_send_confirm', 'new_realm_send_confirm,'
'eventslast_event_id', 'webreq.content', 'avatar', 'user_uploads',
'password.reset', 'static', 'json.bots', 'json.users', 'json.streams',
'accounts.unsubscribe', 'apple-touch-icon', 'emoji', 'json.bots',
'upload_file', 'realm_activity', 'user_activity',
]
def write_log_line(log_data: MutableMapping[str, Any], path: str, method: str, remote_ip: str,
requestor_for_logs: str, client_name: str, status_code: int=200,
error_content: Optional[AnyStr]=None,
error_content_iter: Optional[Iterable[AnyStr]]=None) -> None:
assert error_content is None or error_content_iter is None
if error_content is not None:
error_content_iter = (error_content,)
if settings.STATSD_HOST != '':
# For statsd timer name
if path == '/':
statsd_path = 'webreq'
else:
statsd_path = "webreq.{}".format(path[1:].replace('/', '.'))
# Remove non-ascii chars from path (there should be none, if there are it's
# because someone manually entered a nonexistent path), as UTF-8 chars make
# statsd sad when it sends the key name over the socket
statsd_path = statsd_path.encode('ascii', errors='ignore').decode("ascii")
# TODO: This could probably be optimized to use a regular expression rather than a loop.
suppress_statsd = any(blacklisted in statsd_path for blacklisted in statsd_blacklisted_requests)
else:
suppress_statsd = True
statsd_path = ''
time_delta = -1
# A time duration of -1 means the StartLogRequests middleware
# didn't run for some reason
optional_orig_delta = ""
if 'time_started' in log_data:
time_delta = time.time() - log_data['time_started']
if 'time_stopped' in log_data:
orig_time_delta = time_delta
time_delta = ((log_data['time_stopped'] - log_data['time_started']) +
(time.time() - log_data['time_restarted']))
optional_orig_delta = f" (lp: {format_timedelta(orig_time_delta)})"
remote_cache_output = ""
if 'remote_cache_time_start' in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data['remote_cache_time_start']
remote_cache_count_delta = get_remote_cache_requests() - log_data['remote_cache_requests_start']
if 'remote_cache_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (log_data['remote_cache_time_stopped'] -
log_data['remote_cache_time_restarted'])
remote_cache_count_delta += (log_data['remote_cache_requests_stopped'] -
log_data['remote_cache_requests_restarted'])
if (remote_cache_time_delta > 0.005):
remote_cache_output = f" (mem: {format_timedelta(remote_cache_time_delta)}/{remote_cache_count_delta})"
if not suppress_statsd:
statsd.timing(f"{statsd_path}.remote_cache.time", timedelta_ms(remote_cache_time_delta))
statsd.incr(f"{statsd_path}.remote_cache.querycount", remote_cache_count_delta)
startup_output = ""
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: {})".format(format_timedelta(log_data["startup_time_delta"]))
markdown_output = ""
if 'markdown_time_start' in log_data:
markdown_time_delta = get_markdown_time() - log_data['markdown_time_start']
markdown_count_delta = get_markdown_requests() - log_data['markdown_requests_start']
if 'markdown_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
markdown_time_delta += (log_data['markdown_time_stopped'] -
log_data['markdown_time_restarted'])
markdown_count_delta += (log_data['markdown_requests_stopped'] -
log_data['markdown_requests_restarted'])
if (markdown_time_delta > 0.005):
markdown_output = f" (md: {format_timedelta(markdown_time_delta)}/{markdown_count_delta})"
if not suppress_statsd:
statsd.timing(f"{statsd_path}.markdown.time", timedelta_ms(markdown_time_delta))
statsd.incr(f"{statsd_path}.markdown.count", markdown_count_delta)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get('time', 0)) for query in queries)
db_time_output = f" (db: {format_timedelta(query_time)}/{len(queries)}q)"
if not suppress_statsd:
# Log ms, db ms, and num queries to statsd
statsd.timing(f"{statsd_path}.dbtime", timedelta_ms(query_time))
statsd.incr(f"{statsd_path}.dbq", len(queries))
statsd.timing(f"{statsd_path}.total", timedelta_ms(time_delta))
if 'extra' in log_data:
extra_request_data = " {}".format(log_data['extra'])
else:
extra_request_data = ""
logger_client = f"({requestor_for_logs} via {client_name})"
logger_timing = f'{format_timedelta(time_delta):>5}{optional_orig_delta}{remote_cache_output}{markdown_output}{db_time_output}{startup_output} {path}'
logger_line = f'{remote_ip:<15} {method:<7} {status_code:3} {logger_timing}{extra_request_data} {logger_client}'
if (status_code in [200, 304] and method == "GET" and path.startswith("/static")):
logger.debug(logger_line)
else:
logger.info(logger_line)
if (is_slow_query(time_delta, path)):
slow_query_logger.info(logger_line)
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
profile_path = "/tmp/profile.data.{}.{}".format(path.split("/")[-1], int(time_delta * 1000))
log_data["prof"].dump_stats(profile_path)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
assert error_content_iter is not None
error_content_list = list(error_content_iter)
if not error_content_list:
error_data = ''
elif isinstance(error_content_list[0], str):
error_data = ''.join(error_content_list)
elif isinstance(error_content_list[0], bytes):
error_data = repr(b''.join(error_content_list))
if len(error_data) > 200:
error_data = "[content more than 200 characters]"
logger.info('status=%3d, data=%s, uid=%s', status_code, error_data, requestor_for_logs)
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
# method here too
def process_request(self, request: HttpRequest) -> None:
maybe_tracemalloc_listen()
if hasattr(request, "_log_data"):
# Sanity check to ensure this is being called from the
# Tornado code path that returns responses asynchronously.
assert getattr(request, "saved_response", False)
# Avoid re-initializing request._log_data if it's already there.
return
request._log_data = {}
record_request_start_data(request._log_data)
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
if hasattr(request, "saved_response"):
# The below logging adjustments are unnecessary (because
# we've already imported everything) and incorrect
# (because they'll overwrite data from pre-long-poll
# request processing) when returning a saved response.
return
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request._log_data)
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
if getattr(response, "asynchronous", False):
# This special Tornado "asynchronous" response is
# discarded after going through this code path as Tornado
# intends to block, so we stop here to avoid unnecessary work.
return response
remote_ip = request.META['REMOTE_ADDR']
# Get the requestor's identifier and client, if available.
try:
requestor_for_logs = request._requestor_for_logs
except Exception:
if hasattr(request, 'user') and hasattr(request.user, 'format_requestor_for_logs'):
requestor_for_logs = request.user.format_requestor_for_logs()
else:
requestor_for_logs = "unauth@{}".format(get_subdomain(request) or 'root')
try:
client = request.client.name
except Exception:
client = "?"
if response.streaming:
content_iter = response.streaming_content
content = None
else:
content = response.content
content_iter = None
write_log_line(request._log_data, request.path, request.method,
remote_ip, requestor_for_logs, client, status_code=response.status_code,
error_content=content, error_content_iter=content_iter)
return response
class JsonErrorHandler(MiddlewareMixin):
def __init__(self, get_response: Callable[[Any, WSGIRequest], Union[HttpResponse, BaseException]]) -> None:
super().__init__(get_response)
ignore_logger("zerver.middleware.json_error_handler")
def process_exception(self, request: HttpRequest, exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, MissingAuthenticationError):
if 'text/html' in request.META.get('HTTP_ACCEPT', ''):
# If this looks like a request from a top-level page in a
# browser, send the user to the login page.
#
# TODO: The next part is a bit questionable; it will
# execute the likely intent for intentionally visiting
# an API endpoint without authentication in a browser,
# but that's an unlikely to be done intentionally often.
return HttpResponseRedirect(f'{settings.HOME_NOT_LOGGED_IN}?next={request.path}')
if request.path.startswith("/api"):
# For API routes, ask for HTTP basic auth (email:apiKey).
return json_unauthorized()
else:
# For /json routes, ask for session authentication.
return json_unauthorized(www_authenticate='session')
if isinstance(exception, JsonableError):
return json_response_from_error(exception)
if request.error_format == "JSON":
capture_exception(exception)
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
json_error_logger.error(traceback.format_exc(), extra=dict(request=request))
return json_error(_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
self.process_request(request)
def process_request(self, request: HttpRequest) -> None:
if request.path.startswith("/api/") or request.path.startswith("/json/"):
request.error_format = "JSON"
else:
request.error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ['reason']
def __init__(self, reason: str) -> None:
self.reason: str = reason
@staticmethod
def msg_format() -> str:
return _("CSRF Error: {reason}")
def csrf_failure(request: HttpRequest, reason: str="") -> HttpResponse:
if request.error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class RateLimitMiddleware(MiddlewareMixin):
def set_response_headers(self, response: HttpResponse,
rate_limit_results: List[RateLimitResult]) -> None:
# The limit on the action that was requested is the minimum of the limits that get applied:
limit = min(result.entity.max_api_calls() for result in rate_limit_results)
response['X-RateLimit-Limit'] = str(limit)
# Same principle applies to remaining api calls:
remaining_api_calls = min(result.remaining for result in rate_limit_results)
response['X-RateLimit-Remaining'] = str(remaining_api_calls)
# The full reset time is the maximum of the reset times for the limits that get applied:
reset_time = time.time() + max(result.secs_to_freedom for result in rate_limit_results)
response['X-RateLimit-Reset'] = str(int(reset_time))
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
if not settings.RATE_LIMITING:
return response
# Add X-RateLimit-*** headers
if hasattr(request, '_ratelimits_applied'):
self.set_response_headers(response, request._ratelimits_applied)
return response
def process_exception(self, request: HttpRequest,
exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, RateLimited):
secs_to_freedom = float(str(exception)) # secs_to_freedom is passed to RateLimited when raising
resp = json_error(
_("API usage exceeded rate limit"),
data={'retry-after': secs_to_freedom},
status=429,
)
resp['Retry-After'] = secs_to_freedom
return resp
return None
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class HostDomainMiddleware(MiddlewareMixin):
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
# Match against ALLOWED_HOSTS, which is rather permissive;
# failure will raise DisallowedHost, which is a 400.
request.get_host()
# This check is important to avoid doing the extra work of
# `get_realm` (which does a database query that could be
# problematic for Tornado). Also the error page below is only
# appropriate for a page visited in a browser, not the API.
#
# API authentication will end up checking for an invalid
# realm, and throw a JSON-format error if appropriate.
if request.path.startswith(("/static/", "/api/", "/json/")):
return None
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
try:
request.realm = get_realm(subdomain)
except Realm.DoesNotExist:
return render(request, "zerver/invalid_realm.html", status=404)
return None
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Middleware that sets REMOTE_ADDR based on the HTTP_X_FORWARDED_FOR.
This middleware replicates Django's former SetRemoteAddrFromForwardedFor middleware.
Because Zulip sits behind a NGINX reverse proxy, if the HTTP_X_FORWARDED_FOR
is set in the request, then it has properly been set by NGINX.
Therefore HTTP_X_FORWARDED_FOR's value is trusted.
"""
def process_request(self, request: HttpRequest) -> None:
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# For NGINX reverse proxy servers, the client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
def alter_content(request: HttpRequest, content: bytes) -> bytes:
first_paragraph_text = get_content_description(content, request)
return content.replace(request.placeholder_open_graph_description.encode("utf-8"),
first_paragraph_text.encode("utf-8"))
class FinalizeOpenGraphDescription(MiddlewareMixin):
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
if getattr(request, "placeholder_open_graph_description", None) is not None:
assert not response.streaming
response.content = alter_content(request, response.content)
return response
class ZulipCommonMiddleware(CommonMiddleware):
"""
Patched version of CommonMiddleware to disable the APPEND_SLASH
redirect behavior inside Tornado.
While this has some correctness benefit in encouraging clients
to implement the API correctly, this also saves about 600us in
the runtime of every GET /events query, as the APPEND_SLASH
route resolution logic is surprisingly expensive.
TODO: We should probably extend this behavior to apply to all of
our API routes. The APPEND_SLASH behavior is really only useful
for non-API endpoints things like /login. But doing that
transition will require more careful testing.
"""
def should_redirect_with_slash(self, request: HttpRequest) -> bool:
if settings.RUNNING_INSIDE_TORNADO:
return False
return super().should_redirect_with_slash(request)
| 47.045643 | 154 | 0.682925 | import cProfile
import logging
import time
import traceback
from typing import Any, AnyStr, Callable, Dict, Iterable, List, MutableMapping, Optional, Union
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.db import connection
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.middleware.common import CommonMiddleware
from django.shortcuts import render
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import ugettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from sentry_sdk import capture_exception
from sentry_sdk.integrations.logging import ignore_logger
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
from zerver.lib.db import reset_queries
from zerver.lib.debug import maybe_tracemalloc_listen
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError, RateLimited
from zerver.lib.html_to_text import get_content_description
from zerver.lib.markdown import get_markdown_requests, get_markdown_time
from zerver.lib.rate_limiter import RateLimitResult
from zerver.lib.response import json_error, json_response_from_error, json_unauthorized
from zerver.lib.subdomains import get_subdomain
from zerver.lib.types import ViewFuncT
from zerver.lib.utils import statsd
from zerver.models import Realm, flush_per_request_caches, get_realm
logger = logging.getLogger('zulip.requests')
slow_query_logger = logging.getLogger('zulip.slow_queries')
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
log_data['time_stopped'] = time.time()
log_data['remote_cache_time_stopped'] = get_remote_cache_time()
log_data['remote_cache_requests_stopped'] = get_remote_cache_requests()
log_data['markdown_time_stopped'] = get_markdown_time()
log_data['markdown_requests_stopped'] = get_markdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_timer_stop(request: HttpRequest) -> None:
record_request_stop_data(request._log_data)
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data['time_restarted'] = time.time()
log_data['remote_cache_time_restarted'] = get_remote_cache_time()
log_data['remote_cache_requests_restarted'] = get_remote_cache_requests()
log_data['markdown_time_restarted'] = get_markdown_time()
log_data['markdown_requests_restarted'] = get_markdown_requests()
def async_request_timer_restart(request: HttpRequest) -> None:
if "time_restarted" in request._log_data:
# finish_current_handler
return
record_request_restart_data(request._log_data)
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
reset_queries()
log_data['time_started'] = time.time()
log_data['remote_cache_time_start'] = get_remote_cache_time()
log_data['remote_cache_requests_start'] = get_remote_cache_requests()
log_data['markdown_time_start'] = get_markdown_time()
log_data['markdown_requests_start'] = get_markdown_requests()
def timedelta_ms(timedelta: float) -> float:
return timedelta * 1000
def format_timedelta(timedelta: float) -> str:
if (timedelta >= 1):
return f"{timedelta:.1f}s"
return f"{timedelta_ms(timedelta):.0f}ms"
def is_slow_query(time_delta: float, path: str) -> bool:
if time_delta < 1.2:
return False
is_exempt = \
path in ["/activity", "/json/report/error",
"/api/v1/deployments/report_error"] \
or path.startswith("/realm_activity/") \
or path.startswith("/user_activity/")
if is_exempt:
return time_delta >= 5
if 'webathena_kerberos' in path:
return time_delta >= 10
return True
statsd_blacklisted_requests = [
'do_confirm', 'signup_send_confirm', 'new_realm_send_confirm,'
'eventslast_event_id', 'webreq.content', 'avatar', 'user_uploads',
'password.reset', 'static', 'json.bots', 'json.users', 'json.streams',
'accounts.unsubscribe', 'apple-touch-icon', 'emoji', 'json.bots',
'upload_file', 'realm_activity', 'user_activity',
]
def write_log_line(log_data: MutableMapping[str, Any], path: str, method: str, remote_ip: str,
requestor_for_logs: str, client_name: str, status_code: int=200,
error_content: Optional[AnyStr]=None,
error_content_iter: Optional[Iterable[AnyStr]]=None) -> None:
assert error_content is None or error_content_iter is None
if error_content is not None:
error_content_iter = (error_content,)
if settings.STATSD_HOST != '':
# For statsd timer name
if path == '/':
statsd_path = 'webreq'
else:
statsd_path = "webreq.{}".format(path[1:].replace('/', '.'))
# Remove non-ascii chars from path (there should be none, if there are it's
statsd_path = statsd_path.encode('ascii', errors='ignore').decode("ascii")
suppress_statsd = any(blacklisted in statsd_path for blacklisted in statsd_blacklisted_requests)
else:
suppress_statsd = True
statsd_path = ''
time_delta = -1
optional_orig_delta = ""
if 'time_started' in log_data:
time_delta = time.time() - log_data['time_started']
if 'time_stopped' in log_data:
orig_time_delta = time_delta
time_delta = ((log_data['time_stopped'] - log_data['time_started']) +
(time.time() - log_data['time_restarted']))
optional_orig_delta = f" (lp: {format_timedelta(orig_time_delta)})"
remote_cache_output = ""
if 'remote_cache_time_start' in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data['remote_cache_time_start']
remote_cache_count_delta = get_remote_cache_requests() - log_data['remote_cache_requests_start']
if 'remote_cache_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (log_data['remote_cache_time_stopped'] -
log_data['remote_cache_time_restarted'])
remote_cache_count_delta += (log_data['remote_cache_requests_stopped'] -
log_data['remote_cache_requests_restarted'])
if (remote_cache_time_delta > 0.005):
remote_cache_output = f" (mem: {format_timedelta(remote_cache_time_delta)}/{remote_cache_count_delta})"
if not suppress_statsd:
statsd.timing(f"{statsd_path}.remote_cache.time", timedelta_ms(remote_cache_time_delta))
statsd.incr(f"{statsd_path}.remote_cache.querycount", remote_cache_count_delta)
startup_output = ""
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: {})".format(format_timedelta(log_data["startup_time_delta"]))
markdown_output = ""
if 'markdown_time_start' in log_data:
markdown_time_delta = get_markdown_time() - log_data['markdown_time_start']
markdown_count_delta = get_markdown_requests() - log_data['markdown_requests_start']
if 'markdown_requests_stopped' in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
markdown_time_delta += (log_data['markdown_time_stopped'] -
log_data['markdown_time_restarted'])
markdown_count_delta += (log_data['markdown_requests_stopped'] -
log_data['markdown_requests_restarted'])
if (markdown_time_delta > 0.005):
markdown_output = f" (md: {format_timedelta(markdown_time_delta)}/{markdown_count_delta})"
if not suppress_statsd:
statsd.timing(f"{statsd_path}.markdown.time", timedelta_ms(markdown_time_delta))
statsd.incr(f"{statsd_path}.markdown.count", markdown_count_delta)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get('time', 0)) for query in queries)
db_time_output = f" (db: {format_timedelta(query_time)}/{len(queries)}q)"
if not suppress_statsd:
# Log ms, db ms, and num queries to statsd
statsd.timing(f"{statsd_path}.dbtime", timedelta_ms(query_time))
statsd.incr(f"{statsd_path}.dbq", len(queries))
statsd.timing(f"{statsd_path}.total", timedelta_ms(time_delta))
if 'extra' in log_data:
extra_request_data = " {}".format(log_data['extra'])
else:
extra_request_data = ""
logger_client = f"({requestor_for_logs} via {client_name})"
logger_timing = f'{format_timedelta(time_delta):>5}{optional_orig_delta}{remote_cache_output}{markdown_output}{db_time_output}{startup_output} {path}'
logger_line = f'{remote_ip:<15} {method:<7} {status_code:3} {logger_timing}{extra_request_data} {logger_client}'
if (status_code in [200, 304] and method == "GET" and path.startswith("/static")):
logger.debug(logger_line)
else:
logger.info(logger_line)
if (is_slow_query(time_delta, path)):
slow_query_logger.info(logger_line)
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
profile_path = "/tmp/profile.data.{}.{}".format(path.split("/")[-1], int(time_delta * 1000))
log_data["prof"].dump_stats(profile_path)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
assert error_content_iter is not None
error_content_list = list(error_content_iter)
if not error_content_list:
error_data = ''
elif isinstance(error_content_list[0], str):
error_data = ''.join(error_content_list)
elif isinstance(error_content_list[0], bytes):
error_data = repr(b''.join(error_content_list))
if len(error_data) > 200:
error_data = "[content more than 200 characters]"
logger.info('status=%3d, data=%s, uid=%s', status_code, error_data, requestor_for_logs)
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
def process_request(self, request: HttpRequest) -> None:
maybe_tracemalloc_listen()
if hasattr(request, "_log_data"):
assert getattr(request, "saved_response", False)
return
request._log_data = {}
record_request_start_data(request._log_data)
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
if hasattr(request, "saved_response"):
# The below logging adjustments are unnecessary (because
# we've already imported everything) and incorrect
# request processing) when returning a saved response.
return
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request._log_data)
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
if getattr(response, "asynchronous", False):
# This special Tornado "asynchronous" response is
# discarded after going through this code path as Tornado
# intends to block, so we stop here to avoid unnecessary work.
return response
remote_ip = request.META['REMOTE_ADDR']
# Get the requestor's identifier and client, if available.
try:
requestor_for_logs = request._requestor_for_logs
except Exception:
if hasattr(request, 'user') and hasattr(request.user, 'format_requestor_for_logs'):
requestor_for_logs = request.user.format_requestor_for_logs()
else:
requestor_for_logs = "unauth@{}".format(get_subdomain(request) or 'root')
try:
client = request.client.name
except Exception:
client = "?"
if response.streaming:
content_iter = response.streaming_content
content = None
else:
content = response.content
content_iter = None
write_log_line(request._log_data, request.path, request.method,
remote_ip, requestor_for_logs, client, status_code=response.status_code,
error_content=content, error_content_iter=content_iter)
return response
class JsonErrorHandler(MiddlewareMixin):
def __init__(self, get_response: Callable[[Any, WSGIRequest], Union[HttpResponse, BaseException]]) -> None:
super().__init__(get_response)
ignore_logger("zerver.middleware.json_error_handler")
def process_exception(self, request: HttpRequest, exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, MissingAuthenticationError):
if 'text/html' in request.META.get('HTTP_ACCEPT', ''):
return HttpResponseRedirect(f'{settings.HOME_NOT_LOGGED_IN}?next={request.path}')
if request.path.startswith("/api"):
# For API routes, ask for HTTP basic auth (email:apiKey).
return json_unauthorized()
else:
# For /json routes, ask for session authentication.
return json_unauthorized(www_authenticate='session')
if isinstance(exception, JsonableError):
return json_response_from_error(exception)
if request.error_format == "JSON":
capture_exception(exception)
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
json_error_logger.error(traceback.format_exc(), extra=dict(request=request))
return json_error(_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func: ViewFuncT,
args: List[str], kwargs: Dict[str, Any]) -> None:
self.process_request(request)
def process_request(self, request: HttpRequest) -> None:
if request.path.startswith("/api/") or request.path.startswith("/json/"):
request.error_format = "JSON"
else:
request.error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ['reason']
def __init__(self, reason: str) -> None:
self.reason: str = reason
@staticmethod
def msg_format() -> str:
return _("CSRF Error: {reason}")
def csrf_failure(request: HttpRequest, reason: str="") -> HttpResponse:
if request.error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class RateLimitMiddleware(MiddlewareMixin):
def set_response_headers(self, response: HttpResponse,
rate_limit_results: List[RateLimitResult]) -> None:
# The limit on the action that was requested is the minimum of the limits that get applied:
limit = min(result.entity.max_api_calls() for result in rate_limit_results)
response['X-RateLimit-Limit'] = str(limit)
# Same principle applies to remaining api calls:
remaining_api_calls = min(result.remaining for result in rate_limit_results)
response['X-RateLimit-Remaining'] = str(remaining_api_calls)
# The full reset time is the maximum of the reset times for the limits that get applied:
reset_time = time.time() + max(result.secs_to_freedom for result in rate_limit_results)
response['X-RateLimit-Reset'] = str(int(reset_time))
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
if not settings.RATE_LIMITING:
return response
# Add X-RateLimit-*** headers
if hasattr(request, '_ratelimits_applied'):
self.set_response_headers(response, request._ratelimits_applied)
return response
def process_exception(self, request: HttpRequest,
exception: Exception) -> Optional[HttpResponse]:
if isinstance(exception, RateLimited):
secs_to_freedom = float(str(exception)) # secs_to_freedom is passed to RateLimited when raising
resp = json_error(
_("API usage exceeded rate limit"),
data={'retry-after': secs_to_freedom},
status=429,
)
resp['Retry-After'] = secs_to_freedom
return resp
return None
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class HostDomainMiddleware(MiddlewareMixin):
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
# Match against ALLOWED_HOSTS, which is rather permissive;
# failure will raise DisallowedHost, which is a 400.
request.get_host()
# This check is important to avoid doing the extra work of
# `get_realm` (which does a database query that could be
# problematic for Tornado). Also the error page below is only
# appropriate for a page visited in a browser, not the API.
#
# API authentication will end up checking for an invalid
# realm, and throw a JSON-format error if appropriate.
if request.path.startswith(("/static/", "/api/", "/json/")):
return None
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
try:
request.realm = get_realm(subdomain)
except Realm.DoesNotExist:
return render(request, "zerver/invalid_realm.html", status=404)
return None
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
def process_request(self, request: HttpRequest) -> None:
try:
real_ip = request.META['HTTP_X_FORWARDED_FOR']
except KeyError:
return None
else:
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# For NGINX reverse proxy servers, the client's IP will be the first one.
real_ip = real_ip.split(",")[0].strip()
request.META['REMOTE_ADDR'] = real_ip
def alter_content(request: HttpRequest, content: bytes) -> bytes:
first_paragraph_text = get_content_description(content, request)
return content.replace(request.placeholder_open_graph_description.encode("utf-8"),
first_paragraph_text.encode("utf-8"))
class FinalizeOpenGraphDescription(MiddlewareMixin):
def process_response(self, request: HttpRequest,
response: StreamingHttpResponse) -> StreamingHttpResponse:
if getattr(request, "placeholder_open_graph_description", None) is not None:
assert not response.streaming
response.content = alter_content(request, response.content)
return response
class ZulipCommonMiddleware(CommonMiddleware):
def should_redirect_with_slash(self, request: HttpRequest) -> bool:
if settings.RUNNING_INSIDE_TORNADO:
return False
return super().should_redirect_with_slash(request)
| true | true |
1c3600a9bb030e65a2b7b15ea8f5314f024fe8ac | 3,512 | py | Python | HyperAPI/hdp_api/routes/dashboards.py | MarouenDevs/HyperAPI | dc9390fbb47d235669cdf0b6c1be4608ffdab83f | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/dashboards.py | MarouenDevs/HyperAPI | dc9390fbb47d235669cdf0b6c1be4608ffdab83f | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/dashboards.py | MarouenDevs/HyperAPI | dc9390fbb47d235669cdf0b6c1be4608ffdab83f | [
"BSD-3-Clause"
] | null | null | null | from HyperAPI.hdp_api.routes import Resource, Route
from HyperAPI.hdp_api.routes.base.version_management import available_since
class Dashboards(Resource):
name = "Dashboards"
class _Dashboards(Route):
name = "getDashboards"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _ProjectDashboards(Route):
name = "getProjectDashboards"
httpMethod = Route.GET
path = "/projects/{project_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID
}
class _GetDashboard(Route):
name = "getDashboard"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _GetProjectDashboard(Route):
name = "getProjectDashboard"
httpMethod = Route.GET
path = "/projects/{project_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
class _addDashboard(Route):
name = "addDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _addProjectDashboard(Route):
name = "addProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID
}
class _updateDashboard(Route):
name = "updateDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _updateProjectDashboard(Route):
name = "updateProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
class _deleteDashboard(Route):
name = "deleteDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _deleteProjectDashboard(Route):
name = "deleteProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards/{dashboard_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
| 34.097087 | 94 | 0.626708 | from HyperAPI.hdp_api.routes import Resource, Route
from HyperAPI.hdp_api.routes.base.version_management import available_since
class Dashboards(Resource):
name = "Dashboards"
class _Dashboards(Route):
name = "getDashboards"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _ProjectDashboards(Route):
name = "getProjectDashboards"
httpMethod = Route.GET
path = "/projects/{project_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID
}
class _GetDashboard(Route):
name = "getDashboard"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _GetProjectDashboard(Route):
name = "getProjectDashboard"
httpMethod = Route.GET
path = "/projects/{project_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
class _addDashboard(Route):
name = "addDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _addProjectDashboard(Route):
name = "addProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID
}
class _updateDashboard(Route):
name = "updateDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _updateProjectDashboard(Route):
name = "updateProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards/{dashboard_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
class _deleteDashboard(Route):
name = "deleteDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/dashboards/{dashboard_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
@available_since('3.1')
class _deleteProjectDashboard(Route):
name = "deleteProjectDashboard"
httpMethod = Route.POST
path = "/projects/{project_ID}/dashboards/{dashboard_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dashboard_ID': Route.VALIDATOR_OBJECTID
}
| true | true |
1c36013e2722c6858e1dc215105ee2c17bb2d760 | 1,744 | py | Python | config/wsgi.py | kanhirun/safe-transaction-service | 9bd6103be7d77469a337b6f02c8e0693e7951e4c | [
"MIT"
] | 5 | 2021-01-28T17:41:42.000Z | 2021-11-14T17:09:18.000Z | config/wsgi.py | kanhirun/safe-transaction-service | 9bd6103be7d77469a337b6f02c8e0693e7951e4c | [
"MIT"
] | 8 | 2022-03-15T18:39:45.000Z | 2022-03-28T01:28:13.000Z | config/wsgi.py | kanhirun/safe-transaction-service | 9bd6103be7d77469a337b6f02c8e0693e7951e4c | [
"MIT"
] | 5 | 2021-04-06T17:20:02.000Z | 2022-01-13T10:58:08.000Z | """
WSGI config for Gnosis Safe Transaction Service project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# safe_transaction_service directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'safe_transaction_service'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 41.52381 | 79 | 0.801032 | import os
import sys
from django.core.wsgi import get_wsgi_application
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'safe_transaction_service'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true | true |
1c36018823573bb44fb59b9e5a571b4427cfb7c9 | 2,513 | py | Python | research/serializers.py | wakaflorien/cst-research-api | 4c9dfe0892f2f9211904dd566347e1edd0e34853 | [
"MIT"
] | 1 | 2021-08-10T07:48:09.000Z | 2021-08-10T07:48:09.000Z | research/serializers.py | wakaflorien/cst-research-api | 4c9dfe0892f2f9211904dd566347e1edd0e34853 | [
"MIT"
] | null | null | null | research/serializers.py | wakaflorien/cst-research-api | 4c9dfe0892f2f9211904dd566347e1edd0e34853 | [
"MIT"
] | 1 | 2021-08-16T12:31:26.000Z | 2021-08-16T12:31:26.000Z | from rest_framework import serializers
from .models import *
class ResearchSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = Tbl_Research
fields = '__all__'
class ConferenceSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = Tbl_conference
fields = '__all__'
class ChapterBasedSerializer(serializers.ModelSerializer):
class Meta:
model = Tbl_chap_based
# fields = ['id', 'title', 'author', 'email']
fields = '__all__'
class JournalTbSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = JournalTb
fields = "__all__"
class BookBasedSerializer(serializers.ModelSerializer):
class Meta:
model = BookBased
fields = "__all__"
class ColaboratorSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = Colaborator
fields = "__all__"
class CommunityEngagementSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = CommunityEngagement
fields = '__all__'
class MentorshipSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = MentorShip
fields = '__all__'
class PeerReviewedInternationalSerializer(serializers.ModelSerializer):
class Meta:
model = PeerReviewedInternational
fields = '__all__'
class ResearchInnovationSerializer(serializers.ModelSerializer):
class Meta:
model = ResearchInnovation
fields = '__all__'
class AuthorsSerializer(serializers.ModelSerializer):
class Meta:
model = Authors
fields = '__all__'
class CommunitySerializer(serializers.ModelSerializer):
class Meta:
model = CommunityEngagement
fields = '__all__'
class MentorSerializer(serializers.ModelSerializer):
class Meta:
model = MentorShip
fields = '__all__'
| 25.907216 | 80 | 0.663351 | from rest_framework import serializers
from .models import *
class ResearchSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = Tbl_Research
fields = '__all__'
class ConferenceSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = Tbl_conference
fields = '__all__'
class ChapterBasedSerializer(serializers.ModelSerializer):
class Meta:
model = Tbl_chap_based
fields = '__all__'
class JournalTbSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(
default = serializers.CurrentUserDefault()
)
class Meta:
model = JournalTb
fields = "__all__"
class BookBasedSerializer(serializers.ModelSerializer):
class Meta:
model = BookBased
fields = "__all__"
class ColaboratorSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = Colaborator
fields = "__all__"
class CommunityEngagementSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = CommunityEngagement
fields = '__all__'
class MentorshipSerializer(serializers.ModelSerializer):
staff = serializers.HiddenField(default = serializers.CurrentUserDefault())
class Meta:
model = MentorShip
fields = '__all__'
class PeerReviewedInternationalSerializer(serializers.ModelSerializer):
class Meta:
model = PeerReviewedInternational
fields = '__all__'
class ResearchInnovationSerializer(serializers.ModelSerializer):
class Meta:
model = ResearchInnovation
fields = '__all__'
class AuthorsSerializer(serializers.ModelSerializer):
class Meta:
model = Authors
fields = '__all__'
class CommunitySerializer(serializers.ModelSerializer):
class Meta:
model = CommunityEngagement
fields = '__all__'
class MentorSerializer(serializers.ModelSerializer):
class Meta:
model = MentorShip
fields = '__all__'
| true | true |
1c3601ee6f560ca43bd1fe942944a722f2ab0737 | 391 | py | Python | counter/counter/asgi.py | Rajan917/Python-Django-Projects | e47ac342bf4a047931ed6a1784dbc3d4321aac59 | [
"MIT"
] | 3 | 2022-03-20T10:50:04.000Z | 2022-03-20T10:50:15.000Z | counter/counter/asgi.py | Rajan917/Python-Django-Projects | e47ac342bf4a047931ed6a1784dbc3d4321aac59 | [
"MIT"
] | null | null | null | counter/counter/asgi.py | Rajan917/Python-Django-Projects | e47ac342bf4a047931ed6a1784dbc3d4321aac59 | [
"MIT"
] | null | null | null | """
ASGI config for counter project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'counter.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'counter.settings')
application = get_asgi_application()
| true | true |
1c360299626f54c716aa4aecbead375a80472123 | 2,013 | py | Python | DeepJanus-BeamNG/udacity_integration/take-free-screenshot.py | TrackerSB/deepjanus | f715072645b1e6f2cc0c51ec0c3a6296cfb14a1d | [
"MIT"
] | null | null | null | DeepJanus-BeamNG/udacity_integration/take-free-screenshot.py | TrackerSB/deepjanus | f715072645b1e6f2cc0c51ec0c3a6296cfb14a1d | [
"MIT"
] | null | null | null | DeepJanus-BeamNG/udacity_integration/take-free-screenshot.py | TrackerSB/deepjanus | f715072645b1e6f2cc0c51ec0c3a6296cfb14a1d | [
"MIT"
] | null | null | null | import random
import numpy as np
from beamng_brewer import BeamNGBrewer
from beamng_waypoint import BeamNGWaypoint
from road_storage import RoadStorage
from simulation_data_collector import SimulationDataCollector
from beamng_tig_maps import maps
random.seed(42)
np.random.seed(42)
maps.install_map_if_needed()
def get_node_coords(node):
return node[0], node[1], node[2]
def distance(p1, p2):
return np.linalg.norm(np.subtract(get_node_coords(p1), get_node_coords(p2)))
nodes = RoadStorage().get_road_nodes_by_index(2)
brewer = BeamNGBrewer(road_nodes=nodes)
waypoint_goal = BeamNGWaypoint('waypoint_goal', nodes[40][:3])
maps.beamng_map.generated().write_items(brewer.decal_road.to_json() + '\n' + waypoint_goal.to_json())
brewer.vehicle_start_pose = brewer.road_points.vehicle_start_pose(road_point_index=0)
vehicle = brewer.setup_vehicle()
brewer.setup_scenario_camera()
beamng = brewer.beamng
steps = brewer.params.beamng_steps
sim_data_collector = SimulationDataCollector(vehicle, beamng, brewer.decal_road, brewer.params)
brewer.bring_up()
ai_aggression = None # 1.0
sim_save_path = 'screenshot'
if ai_aggression:
vehicle.ai_set_aggression(ai_aggression)
vehicle.ai_drive_in_lane(True)
vehicle.ai_set_waypoint(waypoint_goal.name)
else:
vehicle.ai_set_mode("disabled")
def start():
for idx in range(1000):
sim_data_collector.collect_current_data()
last_state = sim_data_collector.states[-1]
if distance(last_state.pos, waypoint_goal.position) < 15.0:
pass
def shot(dir, h=-25):
brewer.camera.pose.pos = tuple(last_state.pos[:2]) + (h,)
brewer.camera.pose.rot = dir
brewer.camera.get_rgb_image().save(f'shot{dir}_h{h}.png')
shot((0, 0, -90), -25)
shot((0, 0, -90), -20)
shot((0, 0, -90), -15)
# brewer.camera.resolution = (800,600)
break
beamng.step(steps)
try:
start()
finally:
sim_data_collector.save()
beamng.close()
| 26.84 | 101 | 0.717834 | import random
import numpy as np
from beamng_brewer import BeamNGBrewer
from beamng_waypoint import BeamNGWaypoint
from road_storage import RoadStorage
from simulation_data_collector import SimulationDataCollector
from beamng_tig_maps import maps
random.seed(42)
np.random.seed(42)
maps.install_map_if_needed()
def get_node_coords(node):
return node[0], node[1], node[2]
def distance(p1, p2):
return np.linalg.norm(np.subtract(get_node_coords(p1), get_node_coords(p2)))
nodes = RoadStorage().get_road_nodes_by_index(2)
brewer = BeamNGBrewer(road_nodes=nodes)
waypoint_goal = BeamNGWaypoint('waypoint_goal', nodes[40][:3])
maps.beamng_map.generated().write_items(brewer.decal_road.to_json() + '\n' + waypoint_goal.to_json())
brewer.vehicle_start_pose = brewer.road_points.vehicle_start_pose(road_point_index=0)
vehicle = brewer.setup_vehicle()
brewer.setup_scenario_camera()
beamng = brewer.beamng
steps = brewer.params.beamng_steps
sim_data_collector = SimulationDataCollector(vehicle, beamng, brewer.decal_road, brewer.params)
brewer.bring_up()
ai_aggression = None
sim_save_path = 'screenshot'
if ai_aggression:
vehicle.ai_set_aggression(ai_aggression)
vehicle.ai_drive_in_lane(True)
vehicle.ai_set_waypoint(waypoint_goal.name)
else:
vehicle.ai_set_mode("disabled")
def start():
for idx in range(1000):
sim_data_collector.collect_current_data()
last_state = sim_data_collector.states[-1]
if distance(last_state.pos, waypoint_goal.position) < 15.0:
pass
def shot(dir, h=-25):
brewer.camera.pose.pos = tuple(last_state.pos[:2]) + (h,)
brewer.camera.pose.rot = dir
brewer.camera.get_rgb_image().save(f'shot{dir}_h{h}.png')
shot((0, 0, -90), -25)
shot((0, 0, -90), -20)
shot((0, 0, -90), -15)
break
beamng.step(steps)
try:
start()
finally:
sim_data_collector.save()
beamng.close()
| true | true |
1c36030bb858ef30ae2b6036c5c9ce4b88c467f5 | 164 | py | Python | notes/gammatone_filters/resrc/plot.py | bingo-todd/bingo-todd.github.io | 87cafeb11e53b8633b6dd3226a2ca3d5b3b823e2 | [
"CC-BY-4.0"
] | null | null | null | notes/gammatone_filters/resrc/plot.py | bingo-todd/bingo-todd.github.io | 87cafeb11e53b8633b6dd3226a2ca3d5b3b823e2 | [
"CC-BY-4.0"
] | null | null | null | notes/gammatone_filters/resrc/plot.py | bingo-todd/bingo-todd.github.io | 87cafeb11e53b8633b6dd3226a2ca3d5b3b823e2 | [
"CC-BY-4.0"
] | null | null | null | import sys
sys.path.append('/home/st/Work_Space/module_st/Gammatone-filters/')
from APGTF import APGTF
#
gtf = APGTF(fs=16e3,freq_low=80,freq_high=5e3,N_band=30)
| 23.428571 | 67 | 0.77439 | import sys
sys.path.append('/home/st/Work_Space/module_st/Gammatone-filters/')
from APGTF import APGTF
gtf = APGTF(fs=16e3,freq_low=80,freq_high=5e3,N_band=30)
| true | true |
1c36037060904220c57750babc5d5512fe96cac1 | 1,538 | py | Python | clustering/fas_con_mat.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | 1 | 2020-09-23T00:57:20.000Z | 2020-09-23T00:57:20.000Z | clustering/fas_con_mat.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null | clustering/fas_con_mat.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null | from FT.weighted_tracts import *
from os.path import join as pjoin
from dipy.io.streamline import load_trk
if __name__ == '__main__':
subj = all_subj_folders
names = all_subj_names
index_to_text_file = r'C:\Users\hila\data\megaatlas\megaatlas2nii.txt'
fig_types = ['SLF','AF']
weight_by='1.5_2_AxPasi5'
for s, n in zip(subj, names):
folder_name = subj_folder + s
dir_name = folder_name + '\streamlines'
bvec_file = load_dwi_files(folder_name)[6]
lab_labels_index = nodes_by_index_mega(folder_name)[0]
file_list = os.listdir(dir_name)
file_list = [l for l in file_list if 'mct001rt20_4d' in l]
for fig_type in fig_types:
for file in file_list:
if fig_type in file and '.trk' in file and '_L' in file:
fascicle_file_name_l = pjoin(dir_name, file)
s_l = load_trk(fascicle_file_name_l, "same", bbox_valid_check=False)
s_l = s_l.streamlines
elif fig_type in file and '.trk' in file and '_R' in file:
fascicle_file_name_r = pjoin(dir_name, file)
s_r = load_trk(fascicle_file_name_r, "same", bbox_valid_check=False)
s_r = s_r.streamlines
s_l.extend(s_r)
streamlines = s_l
weighted_connectivity_matrix_mega(streamlines, folder_name, bvec_file, fig_type=fig_type,
weight_by=weight_by)
| 45.235294 | 102 | 0.60143 | from FT.weighted_tracts import *
from os.path import join as pjoin
from dipy.io.streamline import load_trk
if __name__ == '__main__':
subj = all_subj_folders
names = all_subj_names
index_to_text_file = r'C:\Users\hila\data\megaatlas\megaatlas2nii.txt'
fig_types = ['SLF','AF']
weight_by='1.5_2_AxPasi5'
for s, n in zip(subj, names):
folder_name = subj_folder + s
dir_name = folder_name + '\streamlines'
bvec_file = load_dwi_files(folder_name)[6]
lab_labels_index = nodes_by_index_mega(folder_name)[0]
file_list = os.listdir(dir_name)
file_list = [l for l in file_list if 'mct001rt20_4d' in l]
for fig_type in fig_types:
for file in file_list:
if fig_type in file and '.trk' in file and '_L' in file:
fascicle_file_name_l = pjoin(dir_name, file)
s_l = load_trk(fascicle_file_name_l, "same", bbox_valid_check=False)
s_l = s_l.streamlines
elif fig_type in file and '.trk' in file and '_R' in file:
fascicle_file_name_r = pjoin(dir_name, file)
s_r = load_trk(fascicle_file_name_r, "same", bbox_valid_check=False)
s_r = s_r.streamlines
s_l.extend(s_r)
streamlines = s_l
weighted_connectivity_matrix_mega(streamlines, folder_name, bvec_file, fig_type=fig_type,
weight_by=weight_by)
| true | true |
1c3604465198644c072359ff4d494d3b339b52f9 | 2,720 | py | Python | tools/bm_runtime/simple_pre_lag/ttypes.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | null | null | null | tools/bm_runtime/simple_pre_lag/ttypes.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | null | null | null | tools/bm_runtime/simple_pre_lag/ttypes.py | gustavo978/helpful | 59e3fd062cff4451c9bf8268df78a24f93ff67b7 | [
"Unlicense"
] | 2 | 2018-06-06T14:10:23.000Z | 2020-04-07T17:20:55.000Z | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class McOperationErrorCode:
TABLE_FULL = 1
INVALID_MGID = 2
INVALID_MGRP_HANDLE = 3
INVALID_L1_HANDLE = 4
ERROR = 5
_VALUES_TO_NAMES = {
1: "TABLE_FULL",
2: "INVALID_MGID",
3: "INVALID_MGRP_HANDLE",
4: "INVALID_L1_HANDLE",
5: "ERROR",
}
_NAMES_TO_VALUES = {
"TABLE_FULL": 1,
"INVALID_MGID": 2,
"INVALID_MGRP_HANDLE": 3,
"INVALID_L1_HANDLE": 4,
"ERROR": 5,
}
class InvalidMcOperation(TException):
"""
Attributes:
- code
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'code', None, None, ), # 1
)
def __init__(self, code=None,):
self.code = code
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.code = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidMcOperation')
if self.code is not None:
oprot.writeFieldBegin('code', TType.I32, 1)
oprot.writeI32(self.code)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.code)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 24.727273 | 188 | 0.665441 |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class McOperationErrorCode:
TABLE_FULL = 1
INVALID_MGID = 2
INVALID_MGRP_HANDLE = 3
INVALID_L1_HANDLE = 4
ERROR = 5
_VALUES_TO_NAMES = {
1: "TABLE_FULL",
2: "INVALID_MGID",
3: "INVALID_MGRP_HANDLE",
4: "INVALID_L1_HANDLE",
5: "ERROR",
}
_NAMES_TO_VALUES = {
"TABLE_FULL": 1,
"INVALID_MGID": 2,
"INVALID_MGRP_HANDLE": 3,
"INVALID_L1_HANDLE": 4,
"ERROR": 5,
}
class InvalidMcOperation(TException):
thrift_spec = (
None,
(1, TType.I32, 'code', None, None, ),
)
def __init__(self, code=None,):
self.code = code
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.code = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('InvalidMcOperation')
if self.code is not None:
oprot.writeFieldBegin('code', TType.I32, 1)
oprot.writeI32(self.code)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.code)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| true | true |
1c3604c1e0dee5313de62253ae2dd0020284d2a7 | 17,512 | py | Python | docs/chapters/jep/jep-2/flow.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 2 | 2021-06-18T11:55:15.000Z | 2021-08-30T20:15:46.000Z | docs/chapters/jep/jep-2/flow.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | null | null | null | docs/chapters/jep/jep-2/flow.py | abreu4/jina | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | [
"Apache-2.0"
] | 1 | 2020-08-28T09:08:29.000Z | 2020-08-28T09:08:29.000Z | from typing import Union, List, Iterator
from .base import BaseFlow
from ..clients.base import InputFnType, CallbackFnType
from ..enums import DataInputType
from ..helper import deprecated_alias
if False:
import numpy as np
class Flow(BaseFlow):
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def train(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Do training on the current flow
It will start a :py:class:`CLIClient` and call :py:func:`train`.
Example,
.. highlight:: python
.. code-block:: python
with f:
f.train(input_fn)
...
This will call the pre-built reader to read files into an iterator of bytes and feed to the flow.
One may also build a reader/generator on your own.
Example,
.. highlight:: python
.. code-block:: python
def my_reader():
for _ in range(10):
yield b'abcdfeg' # each yield generates a document for training
with f.build(runtime='thread') as flow:
flow.train(bytes_gen=my_reader())
:param input_fn: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
return self._get_client(**kwargs).train(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_ndarray(self, array: 'np.ndarray', axis: int = 0, size: int = None, shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Using numpy ndarray as the index source for the current flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the callback function to invoke after indexing
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_ndarray
return self._get_client(**kwargs).index(_input_ndarray(array, axis, size, shuffle),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_ndarray(self, array: 'np.ndarray', axis: int = 0, size: int = None, shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Use a numpy ndarray as the query source for searching on the current flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_ndarray
self._get_client(**kwargs).search(_input_ndarray(array, axis, size, shuffle),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_lines(self, lines: Iterator[str] = None, filepath: str = None, size: int = None,
sampling_rate: float = None, read_mode='r',
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
""" Use a list of lines as the index source for indexing on the current flow
:param lines: a list of strings, each is considered as d document
:param filepath: a text file that each line contains a document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_lines
return self._get_client(**kwargs).index(_input_lines(lines, filepath, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_files(self, patterns: Union[str, List[str]], recursive: bool = True,
size: int = None, sampling_rate: float = None, read_mode: str = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
""" Use a set of files as the index source for indexing on the current flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary mode
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_files
return self._get_client(**kwargs).index(_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_files(self, patterns: Union[str, List[str]], recursive: bool = True,
size: int = None, sampling_rate: float = None, read_mode: str = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
""" Use a set of files as the query source for searching on the current flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_files
return self._get_client(**kwargs).search(_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_lines(self, filepath: str = None, lines: Iterator[str] = None, size: int = None,
sampling_rate: float = None, read_mode='r',
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
""" Use a list of files as the query source for searching on the current flow
:param filepath: a text file that each line contains a document
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
from ..clients.sugary_io import _input_lines
return self._get_client(**kwargs).search(_input_lines(lines, filepath, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def index(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Do indexing on the current flow
Example,
.. highlight:: python
.. code-block:: python
with f:
f.index(input_fn)
...
This will call the pre-built reader to read files into an iterator of bytes and feed to the flow.
One may also build a reader/generator on your own.
Example,
.. highlight:: python
.. code-block:: python
def my_reader():
for _ in range(10):
yield b'abcdfeg' # each yield generates a document to index
with f.build(runtime='thread') as flow:
flow.index(bytes_gen=my_reader())
It will start a :py:class:`CLIClient` and call :py:func:`index`.
:param input_fn: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
return self._get_client(**kwargs).index(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def update(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Updates documents on the current flow
Example,
.. highlight:: python
.. code-block:: python
with f:
f.update(input_fn)
...
This will call the pre-built reader to read files into an iterator of bytes and feed to the flow.
One may also build a reader/generator on your own.
Example,
.. highlight:: python
.. code-block:: python
def my_reader():
for _ in range(10):
yield b'abcdfeg' # each yield generates a document to update
with f.build(runtime='thread') as flow:
flow.update(bytes_gen=my_reader())
It will start a :py:class:`CLIClient` and call :py:func:`update`.
:param input_fn: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
self._get_client(**kwargs).update(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def delete(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Do deletion on the current flow
Example,
.. highlight:: python
.. code-block:: python
with f:
f.delete(input_fn)
...
This will call the pre-built reader to read files into an iterator of bytes and feed to the flow.
One may also build a reader/generator on your own.
Example,
.. highlight:: python
.. code-block:: python
def my_reader():
for _ in range(10):
yield b'abcdfeg' # each yield generates a document to delete
with f.build(runtime='thread') as flow:
flow.delete(bytes_gen=my_reader())
It will start a :py:class:`CLIClient` and call :py:func:`delete`.
:param input_fn: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
self._get_client(**kwargs).delete(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def search(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
"""Do searching on the current flow
It will start a :py:class:`CLIClient` and call :py:func:`search`.
Example,
.. highlight:: python
.. code-block:: python
with f:
f.search(input_fn)
...
This will call the pre-built reader to read files into an iterator of bytes and feed to the flow.
One may also build a reader/generator on your own.
Example,
.. highlight:: python
.. code-block:: python
def my_reader():
for _ in range(10):
yield b'abcdfeg' # each yield generates a query for searching
with f.build(runtime='thread') as flow:
flow.search(bytes_gen=my_reader())
:param input_fn: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
"""
return self._get_client(**kwargs).search(input_fn, on_done, on_error, on_always, **kwargs)
| 57.042345 | 120 | 0.622887 | from typing import Union, List, Iterator
from .base import BaseFlow
from ..clients.base import InputFnType, CallbackFnType
from ..enums import DataInputType
from ..helper import deprecated_alias
if False:
import numpy as np
class Flow(BaseFlow):
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def train(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
return self._get_client(**kwargs).train(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_ndarray(self, array: 'np.ndarray', axis: int = 0, size: int = None, shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_ndarray
return self._get_client(**kwargs).index(_input_ndarray(array, axis, size, shuffle),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_ndarray(self, array: 'np.ndarray', axis: int = 0, size: int = None, shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_ndarray
self._get_client(**kwargs).search(_input_ndarray(array, axis, size, shuffle),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_lines(self, lines: Iterator[str] = None, filepath: str = None, size: int = None,
sampling_rate: float = None, read_mode='r',
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_lines
return self._get_client(**kwargs).index(_input_lines(lines, filepath, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def index_files(self, patterns: Union[str, List[str]], recursive: bool = True,
size: int = None, sampling_rate: float = None, read_mode: str = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_files
return self._get_client(**kwargs).index(_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_files(self, patterns: Union[str, List[str]], recursive: bool = True,
size: int = None, sampling_rate: float = None, read_mode: str = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_files
return self._get_client(**kwargs).search(_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(output_fn='on_done')
def search_lines(self, filepath: str = None, lines: Iterator[str] = None, size: int = None,
sampling_rate: float = None, read_mode='r',
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
from ..clients.sugary_io import _input_lines
return self._get_client(**kwargs).search(_input_lines(lines, filepath, size, sampling_rate, read_mode),
on_done, on_error, on_always, data_type=DataInputType.CONTENT, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def index(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
return self._get_client(**kwargs).index(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def update(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
self._get_client(**kwargs).update(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def delete(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
self._get_client(**kwargs).delete(input_fn, on_done, on_error, on_always, **kwargs)
@deprecated_alias(buffer='input_fn', callback='on_done', output_fn='on_done')
def search(self, input_fn: InputFnType = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs):
return self._get_client(**kwargs).search(input_fn, on_done, on_error, on_always, **kwargs)
| true | true |
1c36053e57cf3440c729741fa2ff29e67112c592 | 1,695 | py | Python | blobcity/utils/ProblemType.py | Bhumika0201/autoai | 8a1ba4453395798cf694c49ac481ba1d37989fb4 | [
"Apache-2.0"
] | null | null | null | blobcity/utils/ProblemType.py | Bhumika0201/autoai | 8a1ba4453395798cf694c49ac481ba1d37989fb4 | [
"Apache-2.0"
] | null | null | null | blobcity/utils/ProblemType.py | Bhumika0201/autoai | 8a1ba4453395798cf694c49ac481ba1d37989fb4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 BlobCity, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python file consists of Function to identify the problem type either Classification or Regression Type.
"""
import numpy as np
class ProType:
def __init__(self):
pass
def checkType(self,data):
"""
param1: class
param2: target data
This function identify type of problem to be solved either regression or classification
on the basis of datatype and uniquiness in target columns
Conditions:
1. if datatype is Object/String return problem Type as classification
2. else check if it is integer or float type with less then equal to 100 classes then return Classification
else return Regression as the ProblemType
"""
if(data.dtype in ['object']): return dict({'type':'Classification'})
else:
target_length =len(np.unique(data))
if data.dtype in ['int64','float64','int32','float32','int16','float16'] and target_length<=10:
return dict({'type':'Classification'})
else:
return dict({'type':'Regression'})
| 37.666667 | 117 | 0.673156 |
import numpy as np
class ProType:
def __init__(self):
pass
def checkType(self,data):
if(data.dtype in ['object']): return dict({'type':'Classification'})
else:
target_length =len(np.unique(data))
if data.dtype in ['int64','float64','int32','float32','int16','float16'] and target_length<=10:
return dict({'type':'Classification'})
else:
return dict({'type':'Regression'})
| true | true |
1c360552de1eae163d3ff95488b65c547d6d11ea | 1,480 | py | Python | anonymonkey/schemas.py | hugoatease/anonymonkey | 56e6465a7b58238a43c1a46c7c9c77966054153d | [
"Apache-2.0"
] | null | null | null | anonymonkey/schemas.py | hugoatease/anonymonkey | 56e6465a7b58238a43c1a46c7c9c77966054153d | [
"Apache-2.0"
] | null | null | null | anonymonkey/schemas.py | hugoatease/anonymonkey | 56e6465a7b58238a43c1a46c7c9c77966054153d | [
"Apache-2.0"
] | null | null | null | from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
class User(db.Document):
sub = db.StringField(required=True, primary_key=True)
id_token = db.StringField(required=True)
email = db.StringField(required=True)
admin = db.BooleanField(required=True, default=False)
first_name = db.StringField(required=True)
last_name = db.StringField(required=True)
class QuestionOption(db.EmbeddedDocument):
id = db.StringField(required=True)
name = db.StringField(required=True)
class Question(db.EmbeddedDocument):
id = db.StringField(required=True)
name = db.StringField(required=True)
description = db.StringField()
type = db.StringField(required=True, choices=['text', 'paragraph', 'radio', 'checkbox', 'select'])
options = db.EmbeddedDocumentListField(QuestionOption)
class Survey(db.Document):
author = db.ReferenceField(User, required=True)
name = db.StringField(required=True)
authority_url = db.StringField(required=True)
description = db.StringField()
questions = db.EmbeddedDocumentListField(Question)
class AnswerItem(db.EmbeddedDocument):
question = db.StringField(required=True)
answer = db.DynamicField()
class Answer(db.Document):
survey = db.ReferenceField(Survey, required=True)
answers = db.EmbeddedDocumentListField(AnswerItem)
class TokenBlacklist(db.Document):
survey = db.ReferenceField(Survey, required=True)
token = db.StringField(required=True)
| 30.204082 | 102 | 0.739189 | from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
class User(db.Document):
sub = db.StringField(required=True, primary_key=True)
id_token = db.StringField(required=True)
email = db.StringField(required=True)
admin = db.BooleanField(required=True, default=False)
first_name = db.StringField(required=True)
last_name = db.StringField(required=True)
class QuestionOption(db.EmbeddedDocument):
id = db.StringField(required=True)
name = db.StringField(required=True)
class Question(db.EmbeddedDocument):
id = db.StringField(required=True)
name = db.StringField(required=True)
description = db.StringField()
type = db.StringField(required=True, choices=['text', 'paragraph', 'radio', 'checkbox', 'select'])
options = db.EmbeddedDocumentListField(QuestionOption)
class Survey(db.Document):
author = db.ReferenceField(User, required=True)
name = db.StringField(required=True)
authority_url = db.StringField(required=True)
description = db.StringField()
questions = db.EmbeddedDocumentListField(Question)
class AnswerItem(db.EmbeddedDocument):
question = db.StringField(required=True)
answer = db.DynamicField()
class Answer(db.Document):
survey = db.ReferenceField(Survey, required=True)
answers = db.EmbeddedDocumentListField(AnswerItem)
class TokenBlacklist(db.Document):
survey = db.ReferenceField(Survey, required=True)
token = db.StringField(required=True)
| true | true |
1c36061c9a038d7757b87b859b7bfc49c79a6ee3 | 184 | py | Python | Lib/fontTools/ttLib/tables/G_P_O_S_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Lib/fontTools/ttLib/tables/G_P_O_S_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Lib/fontTools/ttLib/tables/G_P_O_S_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .otBase import BaseTTXConverter
class table_G_P_O_S_(BaseTTXConverter):
pass
| 23 | 64 | 0.842391 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .otBase import BaseTTXConverter
class table_G_P_O_S_(BaseTTXConverter):
pass
| true | true |
1c3606c75a682f31ea7158232187d398d2a86387 | 387 | py | Python | python/mcap/mcap0/opcode.py | jameskuszmaul-brt/mcap | bce6560d743359288a75c709ec16cd1c450069c9 | [
"Apache-2.0"
] | 46 | 2022-02-07T18:58:11.000Z | 2022-03-21T22:03:54.000Z | python/mcap/mcap0/opcode.py | jameskuszmaul-brt/mcap | bce6560d743359288a75c709ec16cd1c450069c9 | [
"Apache-2.0"
] | 135 | 2021-12-02T15:21:58.000Z | 2022-03-31T17:50:23.000Z | python/mcap/mcap0/opcode.py | jameskuszmaul-brt/mcap | bce6560d743359288a75c709ec16cd1c450069c9 | [
"Apache-2.0"
] | 4 | 2022-02-20T12:56:02.000Z | 2022-03-30T20:14:52.000Z | from enum import IntEnum, unique
@unique
class Opcode(IntEnum):
ATTACHMENT = 0x09
ATTACHMENT_INDEX = 0x0A
CHANNEL = 0x04
CHUNK = 0x06
CHUNK_INDEX = 0x08
DATA_END = 0x0F
FOOTER = 0x02
HEADER = 0x01
MESSAGE = 0x05
MESSAGE_INDEX = 0x07
METADATA = 0x0C
METADATA_INDEX = 0x0D
SCHEMA = 0x03
STATISTICS = 0x0B
SUMMARY_OFFSET = 0x0E
| 18.428571 | 32 | 0.651163 | from enum import IntEnum, unique
@unique
class Opcode(IntEnum):
ATTACHMENT = 0x09
ATTACHMENT_INDEX = 0x0A
CHANNEL = 0x04
CHUNK = 0x06
CHUNK_INDEX = 0x08
DATA_END = 0x0F
FOOTER = 0x02
HEADER = 0x01
MESSAGE = 0x05
MESSAGE_INDEX = 0x07
METADATA = 0x0C
METADATA_INDEX = 0x0D
SCHEMA = 0x03
STATISTICS = 0x0B
SUMMARY_OFFSET = 0x0E
| true | true |
1c3607c7119e707f4660ea805ec23c4e91e7f6cc | 451 | py | Python | src/membership/s.py | stdevAntiD2ta/fuzzy-control-system | c58b503846a2b46cd09c3457a411f58bcbad3ec7 | [
"MIT"
] | null | null | null | src/membership/s.py | stdevAntiD2ta/fuzzy-control-system | c58b503846a2b46cd09c3457a411f58bcbad3ec7 | [
"MIT"
] | null | null | null | src/membership/s.py | stdevAntiD2ta/fuzzy-control-system | c58b503846a2b46cd09c3457a411f58bcbad3ec7 | [
"MIT"
] | null | null | null | from .membership import Membership
class SMembership(Membership):
def __init__(self, a, c):
def func(x):
if x <= a:
return 0
if a < x and x <= (a + c) / 2:
return 2 * ((x - a) / (c - a)) ** 2
if (a + c) / 2 < x and x < c:
return 1 - 2 * ((c - x) / (c - a)) ** 2
return 1
super(SMembership, self).__init__(func, [a, (a + c) / 2, c])
| 28.1875 | 68 | 0.405765 | from .membership import Membership
class SMembership(Membership):
def __init__(self, a, c):
def func(x):
if x <= a:
return 0
if a < x and x <= (a + c) / 2:
return 2 * ((x - a) / (c - a)) ** 2
if (a + c) / 2 < x and x < c:
return 1 - 2 * ((c - x) / (c - a)) ** 2
return 1
super(SMembership, self).__init__(func, [a, (a + c) / 2, c])
| true | true |
1c360801b72157ce344e9fdfc868b4e6230f5227 | 430 | py | Python | app/core/migrations/0005_recipe_image.py | elivjo/recipe-app-api | d534601078d8359c2722c0c58ce749f6741a3826 | [
"MIT"
] | null | null | null | app/core/migrations/0005_recipe_image.py | elivjo/recipe-app-api | d534601078d8359c2722c0c58ce749f6741a3826 | [
"MIT"
] | null | null | null | app/core/migrations/0005_recipe_image.py | elivjo/recipe-app-api | d534601078d8359c2722c0c58ce749f6741a3826 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-06 14:42
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.5 | 93 | 0.62093 |
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| true | true |
1c360883c8b9f66d32ea685e55532993e097f442 | 11,778 | py | Python | tensorflow/python/kernel_tests/matmul_op_test.py | Kinoo2/tensorflow | e334eb2f95bdece6f0df3eff0cf9c402078fe392 | [
"Apache-2.0"
] | 2 | 2021-06-17T21:26:38.000Z | 2021-06-20T18:25:57.000Z | tensorflow/python/kernel_tests/matmul_op_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 3 | 2021-08-25T15:10:14.000Z | 2022-02-10T04:33:14.000Z | tensorflow/python/kernel_tests/matmul_op_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 3 | 2021-09-26T22:20:25.000Z | 2021-09-26T23:07:13.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
# TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
# os.environ["TF_MATMUL_AUTOTUNE_ENABLE"] = "1" to enable it.
class MatMulMixedType(test_lib.TestCase):
"""Simple test for tf.matmul where Tout is different from T."""
def testBatchMatMulV3OutputType(self):
# TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
for (a_dtype, b_dtype) in [(np.int8, np.int8), (np.uint8, np.uint8)]:
a = np.array([[1, 2], [3, 4]], dtype=a_dtype)
b = np.array([[1, 2], [3, 4]], dtype=b_dtype)
c = math_ops.batch_mat_mul_v3(a, b, adj_y=True, Tout=np.int32)
self.assertAllEqual((2, 2), c.shape)
self.assertAllEqual([[5, 11], [11, 25]], c)
def testBatchMatMulV3MixedPrec(self):
# TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
np_bf16 = dtypes.bfloat16.as_numpy_dtype
a = np.array([[1, 2], [3, 4]], dtype=np.int8)
b = np.array([[1, 2], [3, 4]], dtype=np_bf16)
c = math_ops.batch_mat_mul_v3(a, b, adj_y=True, Tout=np_bf16)
self.assertAllEqual((2, 2), c.shape)
self.assertAllEqual([[5, 11], [11, 25]], c)
class MatVecTest(test_lib.TestCase):
"""Simple test for matvec, which is sugar on top of matmul."""
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, test_util.deprecated_graph_mode_only(fn))
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
@test_util.run_without_tensor_float_32("Tests matmul")
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
# @ operator supported since python 3.5.
infix_matmul = operator.matmul
except AttributeError:
# For earlier versions of python, emulate regular behavior.
# Useful to build and test for 3.5+ on earlier versions.
def infix_matmul(x, y): # pylint: disable=invalid-name
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegex(
Exception, (r"(In\[0\] and In\[1\] has different ndims|In\[0\] "
r"ndims must be >= 2|Shape must be rank 2 but is rank 1)")):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegex(
Exception,
r"(In\[0\] mismatch In\[1\] shape|Dimensions must be equal)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
dtypes_to_test = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
# TF2 does not support placeholders under eager so we skip it
for use_static_shape in set([True, tf2.enabled()]):
for dtype in dtypes_to_test:
if test_util.is_xla_enabled() and (dtype == np.int32 or
dtype == np.int64):
# TODO(b/171924639): Enable this test when XLA DOT supports
# integer types.
continue
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
| 38.616393 | 80 | 0.639328 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
class MatMulMixedType(test_lib.TestCase):
def testBatchMatMulV3OutputType(self):
for (a_dtype, b_dtype) in [(np.int8, np.int8), (np.uint8, np.uint8)]:
a = np.array([[1, 2], [3, 4]], dtype=a_dtype)
b = np.array([[1, 2], [3, 4]], dtype=b_dtype)
c = math_ops.batch_mat_mul_v3(a, b, adj_y=True, Tout=np.int32)
self.assertAllEqual((2, 2), c.shape)
self.assertAllEqual([[5, 11], [11, 25]], c)
def testBatchMatMulV3MixedPrec(self):
np_bf16 = dtypes.bfloat16.as_numpy_dtype
a = np.array([[1, 2], [3, 4]], dtype=np.int8)
b = np.array([[1, 2], [3, 4]], dtype=np_bf16)
c = math_ops.batch_mat_mul_v3(a, b, adj_y=True, Tout=np_bf16)
self.assertAllEqual((2, 2), c.shape)
self.assertAllEqual([[5, 11], [11, 25]], c)
class MatVecTest(test_lib.TestCase):
def testTwoByTwoCase(self):
a = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])
c = math_ops.matvec(a, b)
self.assertAllEqual((2,), c.shape)
self.assertAllEqual([5 + 2 * 6, 3 * 5 + 4 * 6], c)
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, test_util.deprecated_graph_mode_only(fn))
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
@test_util.run_without_tensor_float_32("Tests matmul")
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.int64, np.float16):
self.skipTest("Skipping infeasible gradient test.")
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
[effective_a_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda x: math_ops.matmul(effective_a_np, x, **kwargs_),
[effective_b_np],
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testSimpleStatistics(self):
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
@test_util.run_v1_only("Test requires a Graph and NodeDef inspection")
def testTransposedStatistics(self):
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
g = ops.get_default_graph()
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
try:
infix_matmul = operator.matmul
except AttributeError:
def infix_matmul(x, y):
try:
r = type(x).__matmul__(x, y)
except AttributeError:
r = NotImplemented
if r is NotImplemented and type(x) is not type(y):
try:
r = type(y).__rmatmul__(y, x)
except AttributeError:
r = NotImplemented
if r is NotImplemented:
raise TypeError("unsupported operand type(s) for @: '{}' and '{}'"
.format(type(x).__name__, type(y).__name__))
return r
class MatMulInfixOperatorTest(test_lib.TestCase):
def testMismatchedShape(self):
with self.assertRaisesRegex(
Exception, (r"(In\[0\] and In\[1\] has different ndims|In\[0\] "
r"ndims must be >= 2|Shape must be rank 2 but is rank 1)")):
infix_matmul(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
def testMismatchedDimensions(self):
with self.assertRaisesRegex(
Exception,
r"(In\[0\] mismatch In\[1\] shape|Dimensions must be equal)"):
infix_matmul(
ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_v1_only("Tensor.op is generally not applicable in TF 2")
def testInfixMatmulIsTfMatmul(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
self.assertEqual(c.op.type, "MatMul")
def testInfixMatmulDoesDotProduct(self):
a = ops.convert_to_tensor([[10.0, 20.0, 30.0]])
b = ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0], [80.0, 90.0]])
c = infix_matmul(a, b)
d = math_ops.matmul(a, b)
self.assertAllEqual(c, d)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
dtypes_to_test = [
np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,
np.complex128
]
for use_static_shape in set([True, tf2.enabled()]):
for dtype in dtypes_to_test:
if test_util.is_xla_enabled() and (dtype == np.int32 or
dtype == np.int64):
continue
for m in sizes:
for n in sizes:
for k in sizes:
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
use_static_shape, dtype.__name__, m, n, k, adjoint_a,
transpose_a, adjoint_b, transpose_b)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
| true | true |
1c3608a863d2c4a6cb5953c3e40a40492fa6ec86 | 2,378 | py | Python | test/unit/objects/test_processorspec.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
] | null | null | null | test/unit/objects/test_processorspec.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
] | null | null | null | test/unit/objects/test_processorspec.py | pyGSTi-Developers/pyGSTi | bfedc1de4d604f14b0f958615776fb80ddb59e33 | [
"Apache-2.0"
] | null | null | null | import unittest
import numpy as np
import scipy
from pygsti.processors import QubitProcessorSpec
from pygsti.models import modelconstruction as mc
from pygsti.circuits import Circuit
from ..util import BaseCase, with_temp_path
def save_and_load(obj, pth):
obj.write(pth + ".json")
return QubitProcessorSpec.read(pth + '.json')
class ProcessorSpecTester(BaseCase):
@unittest.skip("REMOVEME")
def test_construct_with_nonstd_gate_unitary_factory(self):
nQubits = 2
def fn(args):
if args is None: args = (0,)
a, = args
sigmaZ = np.array([[1, 0], [0, -1]], 'd')
return scipy.linalg.expm(1j * float(a) * sigmaZ)
ps = QubitProcessorSpec(nQubits, ('Gx', 'Gy', 'Gcnot', 'Ga'), nonstd_gate_unitaries={'Ga': fn})
mdl = mc.create_crosstalk_free_model(ps)
c = Circuit("Gx:1Ga;0.3:1Gx:1@(0,1)")
p = mdl.probabilities(c)
self.assertAlmostEqual(p['00'], 0.08733219254516078)
self.assertAlmostEqual(p['01'], 0.9126678074548386)
c2 = Circuit("Gx:1Ga;0.78539816:1Gx:1@(0,1)") # a clifford: 0.78539816 = pi/4
p2 = mdl.probabilities(c2)
self.assertAlmostEqual(p2['00'], 0.5)
self.assertAlmostEqual(p2['01'], 0.5)
@with_temp_path
def test_with_spam(self, pth):
pspec_defaults = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line')
pspec_names = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line',
prep_names=("rho1", "rho_1100"), povm_names=("Mz",))
prep_vec = np.zeros(2**4, complex)
prep_vec[4] = 1.0
EA = np.zeros(2**4, complex)
EA[14] = 1.0
EB = np.zeros(2**4, complex)
EB[15] = 1.0
pspec_vecs = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line',
prep_names=("rhoA", "rhoC"), povm_names=("Ma", "Mc"),
nonstd_preps={'rhoA': "rho0", 'rhoC': prep_vec},
nonstd_povms={'Ma': {'0': "0000", '1': EA},
'Mc': {'OutA': "0000", 'OutB': [EA, EB]}})
pspec_defaults = save_and_load(pspec_defaults, pth)
pspec_names = save_and_load(pspec_names, pth)
pspec_vecs = save_and_load(pspec_vecs, pth)
| 36.584615 | 103 | 0.56434 | import unittest
import numpy as np
import scipy
from pygsti.processors import QubitProcessorSpec
from pygsti.models import modelconstruction as mc
from pygsti.circuits import Circuit
from ..util import BaseCase, with_temp_path
def save_and_load(obj, pth):
obj.write(pth + ".json")
return QubitProcessorSpec.read(pth + '.json')
class ProcessorSpecTester(BaseCase):
@unittest.skip("REMOVEME")
def test_construct_with_nonstd_gate_unitary_factory(self):
nQubits = 2
def fn(args):
if args is None: args = (0,)
a, = args
sigmaZ = np.array([[1, 0], [0, -1]], 'd')
return scipy.linalg.expm(1j * float(a) * sigmaZ)
ps = QubitProcessorSpec(nQubits, ('Gx', 'Gy', 'Gcnot', 'Ga'), nonstd_gate_unitaries={'Ga': fn})
mdl = mc.create_crosstalk_free_model(ps)
c = Circuit("Gx:1Ga;0.3:1Gx:1@(0,1)")
p = mdl.probabilities(c)
self.assertAlmostEqual(p['00'], 0.08733219254516078)
self.assertAlmostEqual(p['01'], 0.9126678074548386)
c2 = Circuit("Gx:1Ga;0.78539816:1Gx:1@(0,1)")
p2 = mdl.probabilities(c2)
self.assertAlmostEqual(p2['00'], 0.5)
self.assertAlmostEqual(p2['01'], 0.5)
@with_temp_path
def test_with_spam(self, pth):
pspec_defaults = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line')
pspec_names = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line',
prep_names=("rho1", "rho_1100"), povm_names=("Mz",))
prep_vec = np.zeros(2**4, complex)
prep_vec[4] = 1.0
EA = np.zeros(2**4, complex)
EA[14] = 1.0
EB = np.zeros(2**4, complex)
EB[15] = 1.0
pspec_vecs = QubitProcessorSpec(4, ['Gxpi2', 'Gypi2'], geometry='line',
prep_names=("rhoA", "rhoC"), povm_names=("Ma", "Mc"),
nonstd_preps={'rhoA': "rho0", 'rhoC': prep_vec},
nonstd_povms={'Ma': {'0': "0000", '1': EA},
'Mc': {'OutA': "0000", 'OutB': [EA, EB]}})
pspec_defaults = save_and_load(pspec_defaults, pth)
pspec_names = save_and_load(pspec_names, pth)
pspec_vecs = save_and_load(pspec_vecs, pth)
| true | true |
1c360974e2cc8474ccbabb66f2c16d9b1f00c6a6 | 13,679 | py | Python | examples/error_analysis.py | OsmanMutlu/pytorch-pretrained-BERT | 837cde8fe82549dc5aeb34aa25b86c07c9448a46 | [
"Apache-2.0"
] | null | null | null | examples/error_analysis.py | OsmanMutlu/pytorch-pretrained-BERT | 837cde8fe82549dc5aeb34aa25b86c07c9448a46 | [
"Apache-2.0"
] | null | null | null | examples/error_analysis.py | OsmanMutlu/pytorch-pretrained-BERT | 837cde8fe82549dc5aeb34aa25b86c07c9448a46 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Random baseline for the PAN19 hyperpartisan news detection task"""
# Version: 2018-09-24
# Parameters:
# --inputDataset=<directory>
# Directory that contains the articles XML file with the articles for which a prediction should be made.
# --outputDir=<directory>
# Directory to which the predictions will be written. Will be created if it does not exist.
from __future__ import division
import os
import sys
from lxml import etree
import codecs
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
from pathlib import Path
import glob
import argparse
import logging
import datetime
import random
import math
import torch
from torch.utils.data import Dataset, DataLoader
from pytorch_pretrained_bert.tokenization import printable_text, BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.optimization import BertAdam
logging.basicConfig(filename = '{}_log.txt'.format(datetime.datetime.now()),
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
runOutputFileName = "prediction.txt"
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_test_examples(self, inputFile):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a comma separated value file."""
lines = pd.read_csv(input_file)
return lines
class HyperProcessor(DataProcessor):
"""Processor for the Hyperpartisan data set."""
def get_test_examples(self, inputFile):
"""See base class."""
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in lines.iterrows():
guid = i
text_a = str(line.text)
label = str(int(line.hyperpartisan))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class EmwProcessor(DataProcessor):
"""Processor for the Emw data set."""
def get_test_examples(self, inputFile):
"""See base class."""
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in lines.iterrows():
# guid = i
guid = i
text_a = str(line.text)
label = str(int(line.label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class EmwProcessor2(DataProcessor):
"""Processor for the Emw data set."""
def get_test_examples(self, inputFile):
"""See base class."""
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in lines.iterrows():
# guid = i
guid = i
text_a = str(line.text)
label = str(int(line.label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(example, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class HyperpartisanData(Dataset):
""""""
def __init__(self, examples, label_list, max_seq_length, tokenizer):
self.examples = examples
self.label_list = label_list
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
ex = self.examples[idx]
feats = convert_examples_to_features(ex, self.label_list, self.max_seq_length, self.tokenizer)
input_ids = torch.tensor(feats.input_ids, dtype=torch.long)
input_mask = torch.tensor(feats.input_mask, dtype=torch.long)
segment_ids = torch.tensor(feats.segment_ids, dtype=torch.long)
label_id = torch.tensor(feats.label_id, dtype=torch.long)
return input_ids, input_mask, segment_ids, label_id, ex.guid
def main():
"""Main method of this module."""
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--inputFile",
default=None,
type=str,
required=True,
help="The input data dir")
parser.add_argument("-o", "--outputFile", default=None, type=str,
help="Output file for predictions")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default="emw",
type=str,
help="The name of the task to train.")
parser.add_argument("--model_load",
default="",
type=str,
required=True,
help="The path of model state.")
parser.add_argument("--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--batch_size",
default=16,
type=int,
help="Batch size.")
args = parser.parse_args()
processors = {
"hyperpartisan": HyperProcessor,
"emw": EmwProcessor,
"emw2": EmwProcessor2,
}
bert_model = args.bert_model
max_seq_length = args.max_seq_length
model_path = args.model_load
batch_size = args.batch_size
task_name = args.task_name.lower()
processor = processors[task_name]()
label_list = processor.get_labels()
inputFile = args.inputFile
outputFile = args.outputFile
num_labels = len(label_list)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained(bert_model)
model = BertForSequenceClassification.from_pretrained(bert_model, PYTORCH_PRETRAINED_BERT_CACHE, num_labels=num_labels)
try:
model.load_state_dict(torch.load(model_path)) # , map_location='cpu' for only cpu
except: #When model is parallel
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path)) # , map_location='cpu' for only cpu
logger.info("Model state has been loaded.")
model.to(device)
test_examples = processor.get_test_examples(inputFile)
random.shuffle(test_examples)
test_dataloader = DataLoader(dataset=HyperpartisanData(test_examples, label_list, max_seq_length, tokenizer), batch_size=batch_size)
df = pd.read_csv(inputFile)
df["prediction"] = 0
model.eval()
for input_ids, input_mask, segment_ids, label_ids, doc_ids in test_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
labels = np.argmax(logits, axis=1)
for i in range(len(labels)):
df.iloc[int(doc_ids[i].item()), df.columns.get_loc("prediction")] = int(labels[i])
df.to_csv(outputFile, index=False)
logger.info("The predictions have been written to the output folder.")
if __name__ == '__main__':
main()
| 35.622396 | 136 | 0.626362 |
from __future__ import division
import os
import sys
from lxml import etree
import codecs
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
from pathlib import Path
import glob
import argparse
import logging
import datetime
import random
import math
import torch
from torch.utils.data import Dataset, DataLoader
from pytorch_pretrained_bert.tokenization import printable_text, BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.optimization import BertAdam
logging.basicConfig(filename = '{}_log.txt'.format(datetime.datetime.now()),
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
runOutputFileName = "prediction.txt"
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_test_examples(self, inputFile):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
lines = pd.read_csv(input_file)
return lines
class HyperProcessor(DataProcessor):
def get_test_examples(self, inputFile):
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in lines.iterrows():
guid = i
text_a = str(line.text)
label = str(int(line.hyperpartisan))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class EmwProcessor(DataProcessor):
def get_test_examples(self, inputFile):
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in lines.iterrows():
guid = i
text_a = str(line.text)
label = str(int(line.label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class EmwProcessor2(DataProcessor):
def get_test_examples(self, inputFile):
return self._create_examples(
self._read_tsv(inputFile), "test")
def get_labels(self):
return ["0", "1", "2"]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in lines.iterrows():
guid = i
text_a = str(line.text)
label = str(int(line.label))
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(example, label_list, max_seq_length, tokenizer):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
s.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class HyperpartisanData(Dataset):
def __init__(self, examples, label_list, max_seq_length, tokenizer):
self.examples = examples
self.label_list = label_list
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
ex = self.examples[idx]
feats = convert_examples_to_features(ex, self.label_list, self.max_seq_length, self.tokenizer)
input_ids = torch.tensor(feats.input_ids, dtype=torch.long)
input_mask = torch.tensor(feats.input_mask, dtype=torch.long)
segment_ids = torch.tensor(feats.segment_ids, dtype=torch.long)
label_id = torch.tensor(feats.label_id, dtype=torch.long)
return input_ids, input_mask, segment_ids, label_id, ex.guid
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--inputFile",
default=None,
type=str,
required=True,
help="The input data dir")
parser.add_argument("-o", "--outputFile", default=None, type=str,
help="Output file for predictions")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default="emw",
type=str,
help="The name of the task to train.")
parser.add_argument("--model_load",
default="",
type=str,
required=True,
help="The path of model state.")
parser.add_argument("--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--batch_size",
default=16,
type=int,
help="Batch size.")
args = parser.parse_args()
processors = {
"hyperpartisan": HyperProcessor,
"emw": EmwProcessor,
"emw2": EmwProcessor2,
}
bert_model = args.bert_model
max_seq_length = args.max_seq_length
model_path = args.model_load
batch_size = args.batch_size
task_name = args.task_name.lower()
processor = processors[task_name]()
label_list = processor.get_labels()
inputFile = args.inputFile
outputFile = args.outputFile
num_labels = len(label_list)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained(bert_model)
model = BertForSequenceClassification.from_pretrained(bert_model, PYTORCH_PRETRAINED_BERT_CACHE, num_labels=num_labels)
try:
model.load_state_dict(torch.load(model_path)) # , map_location='cpu' for only cpu
except: #When model is parallel
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path)) # , map_location='cpu' for only cpu
logger.info("Model state has been loaded.")
model.to(device)
test_examples = processor.get_test_examples(inputFile)
random.shuffle(test_examples)
test_dataloader = DataLoader(dataset=HyperpartisanData(test_examples, label_list, max_seq_length, tokenizer), batch_size=batch_size)
df = pd.read_csv(inputFile)
df["prediction"] = 0
model.eval()
for input_ids, input_mask, segment_ids, label_ids, doc_ids in test_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
labels = np.argmax(logits, axis=1)
for i in range(len(labels)):
df.iloc[int(doc_ids[i].item()), df.columns.get_loc("prediction")] = int(labels[i])
df.to_csv(outputFile, index=False)
logger.info("The predictions have been written to the output folder.")
if __name__ == '__main__':
main()
| true | true |
1c360b20042a5456b774f7aac67b4d7304b905ab | 1,975 | py | Python | plot_bar.py | lauromoraes/CapsNet-promoter | 9b08912648ff5d58a11ebb42225d9ad9851c61ac | [
"MIT"
] | 2 | 2021-11-08T16:21:56.000Z | 2022-03-07T01:49:26.000Z | plot_bar.py | lauromoraes/CapsNet-promoter | 9b08912648ff5d58a11ebb42225d9ad9851c61ac | [
"MIT"
] | null | null | null | plot_bar.py | lauromoraes/CapsNet-promoter | 9b08912648ff5d58a11ebb42225d9ad9851c61ac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 13:32:01 2018
@author: fnord
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sn_a_mean1 = (.91, .97, .88, .87, .90, .88, .94)
sn_a_std1 = (.01, .02, .07, .04, .02, .02, .01)
sn_a_mean2 = (.94, .95, .91, .90, .90, .88, .97)
sn_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
# =========================================
sp_a_mean1 = (.96, .98, .95, .97, .98, .95, .98)
sp_a_std1 = (.01, .01, .02, .01, .01, .01, .01)
sp_a_mean2 = (.94, .97, .95, .96, .98, .94, .97)
sp_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
# =========================================
mcc_a_mean1 = (.88, .95, .83, .86, .90, .84, .92)
mcc_a_std1 = (.01, .01, .08, .02, .01, .01, .01)
mcc_a_mean2 = (.86, .91, .86, .84, .89, .83, .93)
mcc_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
N = len(sn_a_mean1)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mcc_a_mean2, width, color='#D4D6D3', yerr=mcc_a_std2)
rects2 = ax.bar(ind + width, mcc_a_mean1, width, color='#E5BE83', yerr=mcc_a_std1)
# add some text for labels, title and axes ticks
ax.set_ylabel('Mcc')
ax.set_title(u'Comparação do Coeficiente de Matthews')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.legend((rects1[0], rects2[0]), ('M1', 'M2'), loc=3)
#def autolabel(rects):
# """
# Attach a text label above each bar displaying its height
# """
# for rect in rects:
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
# '%d' % int(height),
# ha='center', va='bottom')
#
#autolabel(rects1)
#autolabel(rects2)
plt.tight_layout()
plt.savefig('comp_mcc.eps', format='eps', dpi=3000)
plt.show() | 29.477612 | 135 | 0.569114 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sn_a_mean1 = (.91, .97, .88, .87, .90, .88, .94)
sn_a_std1 = (.01, .02, .07, .04, .02, .02, .01)
sn_a_mean2 = (.94, .95, .91, .90, .90, .88, .97)
sn_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
sp_a_mean1 = (.96, .98, .95, .97, .98, .95, .98)
sp_a_std1 = (.01, .01, .02, .01, .01, .01, .01)
sp_a_mean2 = (.94, .97, .95, .96, .98, .94, .97)
sp_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
mcc_a_mean1 = (.88, .95, .83, .86, .90, .84, .92)
mcc_a_std1 = (.01, .01, .08, .02, .01, .01, .01)
mcc_a_mean2 = (.86, .91, .86, .84, .89, .83, .93)
mcc_a_std2 = (.0, .0, .0, .0, .0, .0, .0)
N = len(sn_a_mean1)
ind = np.arange(N)
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mcc_a_mean2, width, color='#D4D6D3', yerr=mcc_a_std2)
rects2 = ax.bar(ind + width, mcc_a_mean1, width, color='#E5BE83', yerr=mcc_a_std1)
ax.set_ylabel('Mcc')
ax.set_title(u'Comparação do Coeficiente de Matthews')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('Arabidopsis_non_tata', 'Arabidopsis_tata', 'Bacillus', 'Ecoli', 'Human_non_tata', 'Mouse_non_tata', 'Mouse_tata'))
for tick in ax.get_xticklabels():
tick.set_rotation(90)
ax.legend((rects1[0], rects2[0]), ('M1', 'M2'), loc=3)
# Attach a text label above each bar displaying its height
# """
plt.tight_layout()
plt.savefig('comp_mcc.eps', format='eps', dpi=3000)
plt.show() | true | true |
1c360bb7ceb0f04bc920af733bb8e3733cdbaa47 | 947 | py | Python | thrift/compiler/py/generate/__init__.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | 2 | 2021-06-29T13:42:22.000Z | 2021-09-06T10:57:34.000Z | thrift/compiler/py/generate/__init__.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | null | null | null | thrift/compiler/py/generate/__init__.py | CacheboxInc/fbthrift | b894dd9192ea4684c0067c93bb2ba2b9547749ec | [
"Apache-2.0"
] | 5 | 2021-06-29T13:42:26.000Z | 2022-02-08T02:41:34.000Z | #!/usr/local/bin/python2.6 -tt
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import t_generator
import t_cpp_generator
import t_schema_generator
__all__ = [t_cpp_generator, t_schema_generator, t_generator]
| 36.423077 | 61 | 0.782471 |
import t_generator
import t_cpp_generator
import t_schema_generator
__all__ = [t_cpp_generator, t_schema_generator, t_generator]
| true | true |
1c360bbc8de80b967f85f36dce48094b547089c2 | 3,644 | py | Python | bibliography/mdfiles.py | poely/website-content | 2ac8dd35a4bac5536c33cf01466728c606cfeb8e | [
"MIT"
] | null | null | null | bibliography/mdfiles.py | poely/website-content | 2ac8dd35a4bac5536c33cf01466728c606cfeb8e | [
"MIT"
] | null | null | null | bibliography/mdfiles.py | poely/website-content | 2ac8dd35a4bac5536c33cf01466728c606cfeb8e | [
"MIT"
] | null | null | null |
"""
MD files
"""
def save_md_file(output_path, md_content):
'''
save md cont to output path
'''
file = open(output_path, 'w', encoding='utf-8')
file.write(md_content)
file.close()
def create_author_md_files(author_bib_keys, list_researchers):
'''
Creates md file for every author in: './content/pages/publications/'
'''
for name, bib_keys in author_bib_keys.items():
author_name = name.replace('_', ' ')
groups = list_researchers[name][1]
md_string = 'title: Publications of ' + \
list_researchers[name][2] + '\n'
md_string += 'template: publications-author\n'
md_string += 'author: ' + name.lower() + '\n'
md_string += 'author_name: ' + list_researchers[name][2] + '\n'
md_string += 'groups: ' + ','.join(groups) + '\n'
md_string += 'bibkeys: ' + ','.join(bib_keys)
md_file_name = './content/pages/publications/' + name.lower() + '.md'
save_md_file(md_file_name, md_string)
def create_publication_md(bib_items, author_bib_keys, list_researchers):
'''
Create md file for every publication in: './content/pages/publications/'
'''
for bib_key, bib_item in bib_items.items():
diag_authors = []
groups = set()
for name, bib_keys in author_bib_keys.items():
for bkey in bib_keys:
if bib_key == bkey:
diag_authors.append(name)
for group in list_researchers[name][1]:
groups.add(group)
md_string = 'title: ' + bib_item['title'] + '\n'
md_string += 'authors: ' + bib_item['authors'] + '\n'
md_string += 'has_pdf: True \n' if 'file' in bib_item else 'has_pdf: False \n'
md_string += 'bibkey: ' + bib_key + '\n'
md_string += 'groups: ' + ','.join(groups) + '\n'
md_string += 'booktitle: NA \n' if 'booktitle' not in bib_item else 'booktitle: ' + \
bib_item['booktitle'] + '\n'
md_string += 'year: NA \n' if 'year' not in bib_item else 'year: ' + \
bib_item['year'] + '\n'
md_string += 'doi: NA \n' if 'doi' not in bib_item else 'doi: ' + \
bib_item['doi'] + '\n'
if bib_item['type'] == 'phdthesis':
md_string += 'template: publication-thesis\n'
# TODO this is a hardcode capital first letter of bibkey
cover_path = bib_key[0].title() + bib_key[1:] + '.png'
md_string += 'coverpng: ' + cover_path + '\n'
for k in 'promotor', 'copromotor', 'school', 'optmonth':
if k in bib_item:
md_string += k + ': ' + bib_item[k] + '\n'
if 'url' in bib_item:
md_string += 'urlweb: ' + bib_item['url'] + '\n'
else:
md_string += 'template: publication\n'
md_string += 'diag_authors: ' + \
','.join(diag_authors) + '\n'
md_string += 'journal: NA \n' if 'journal' not in bib_item else 'journal: ' + \
bib_item['journal'] + '\n'
md_string += '' if 'abstract' not in bib_item else bib_item['abstract']
md_file_name = './content/pages/publications/' + bib_key + '.md'
save_md_file(md_file_name, md_string)
| 43.380952 | 105 | 0.497256 |
def save_md_file(output_path, md_content):
file = open(output_path, 'w', encoding='utf-8')
file.write(md_content)
file.close()
def create_author_md_files(author_bib_keys, list_researchers):
for name, bib_keys in author_bib_keys.items():
author_name = name.replace('_', ' ')
groups = list_researchers[name][1]
md_string = 'title: Publications of ' + \
list_researchers[name][2] + '\n'
md_string += 'template: publications-author\n'
md_string += 'author: ' + name.lower() + '\n'
md_string += 'author_name: ' + list_researchers[name][2] + '\n'
md_string += 'groups: ' + ','.join(groups) + '\n'
md_string += 'bibkeys: ' + ','.join(bib_keys)
md_file_name = './content/pages/publications/' + name.lower() + '.md'
save_md_file(md_file_name, md_string)
def create_publication_md(bib_items, author_bib_keys, list_researchers):
for bib_key, bib_item in bib_items.items():
diag_authors = []
groups = set()
for name, bib_keys in author_bib_keys.items():
for bkey in bib_keys:
if bib_key == bkey:
diag_authors.append(name)
for group in list_researchers[name][1]:
groups.add(group)
md_string = 'title: ' + bib_item['title'] + '\n'
md_string += 'authors: ' + bib_item['authors'] + '\n'
md_string += 'has_pdf: True \n' if 'file' in bib_item else 'has_pdf: False \n'
md_string += 'bibkey: ' + bib_key + '\n'
md_string += 'groups: ' + ','.join(groups) + '\n'
md_string += 'booktitle: NA \n' if 'booktitle' not in bib_item else 'booktitle: ' + \
bib_item['booktitle'] + '\n'
md_string += 'year: NA \n' if 'year' not in bib_item else 'year: ' + \
bib_item['year'] + '\n'
md_string += 'doi: NA \n' if 'doi' not in bib_item else 'doi: ' + \
bib_item['doi'] + '\n'
if bib_item['type'] == 'phdthesis':
md_string += 'template: publication-thesis\n'
cover_path = bib_key[0].title() + bib_key[1:] + '.png'
md_string += 'coverpng: ' + cover_path + '\n'
for k in 'promotor', 'copromotor', 'school', 'optmonth':
if k in bib_item:
md_string += k + ': ' + bib_item[k] + '\n'
if 'url' in bib_item:
md_string += 'urlweb: ' + bib_item['url'] + '\n'
else:
md_string += 'template: publication\n'
md_string += 'diag_authors: ' + \
','.join(diag_authors) + '\n'
md_string += 'journal: NA \n' if 'journal' not in bib_item else 'journal: ' + \
bib_item['journal'] + '\n'
md_string += '' if 'abstract' not in bib_item else bib_item['abstract']
md_file_name = './content/pages/publications/' + bib_key + '.md'
save_md_file(md_file_name, md_string)
| true | true |
1c360c5395cb11877a6502a94516e1fbf35dddb6 | 842 | py | Python | bottender/bot/base.py | stegben/bottender-py | 07861e13a8c23507cbe7c783631d193d1d1951d9 | [
"MIT"
] | 1 | 2018-12-28T02:51:10.000Z | 2018-12-28T02:51:10.000Z | bottender/bot/base.py | stegben/bottender-py | 07861e13a8c23507cbe7c783631d193d1d1951d9 | [
"MIT"
] | null | null | null | bottender/bot/base.py | stegben/bottender-py | 07861e13a8c23507cbe7c783631d193d1d1951d9 | [
"MIT"
] | 1 | 2019-04-05T04:35:28.000Z | 2019-04-05T04:35:28.000Z | import abc
import asyncio as aio
class Bot(abc.ABC):
_handler = None
def __init__(self, connector, session_store, loop=None):
self._connector = connector
self._session_store = session_store
self._loop = loop
def create_request_handler(self):
if self._handler is None:
raise RuntimeError("Handler has not been set yet. Use bot.on_event")
return self._request_handler
async def _request_handler(self, request_body, request_context=None):
contexts = self._connector.map_request_to_contexts(request_body)
for context in contexts:
await self._handler(context)
def on_event(self, handler):
if not aio.iscoroutinefunction(handler):
raise TypeError("Handler should be a coroutine function")
self._handler = handler
| 30.071429 | 80 | 0.684086 | import abc
import asyncio as aio
class Bot(abc.ABC):
_handler = None
def __init__(self, connector, session_store, loop=None):
self._connector = connector
self._session_store = session_store
self._loop = loop
def create_request_handler(self):
if self._handler is None:
raise RuntimeError("Handler has not been set yet. Use bot.on_event")
return self._request_handler
async def _request_handler(self, request_body, request_context=None):
contexts = self._connector.map_request_to_contexts(request_body)
for context in contexts:
await self._handler(context)
def on_event(self, handler):
if not aio.iscoroutinefunction(handler):
raise TypeError("Handler should be a coroutine function")
self._handler = handler
| true | true |
1c360ce0fb1f7dec70b6d95a8302baedce513186 | 2,878 | py | Python | examples/sparks.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | examples/sparks.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | examples/sparks.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | import pygame, pgeng
from random import uniform, randint
from pygame.locals import *
pygame.init()
#pgeng.set_spark_attributes(side_length=0.4, back_length=2.5)# to change how it looks
screen = pgeng.Screen((640, 480), SCALED | RESIZABLE, vsync=0)
display = screen.get_display()
clock = pygame.time.Clock()
pygame.mouse.set_visible(False)
sparks = []
shockwaves = []
turn = False
lighting = False
turnnumber = 0.08
haveangle = False
gravity = False
large_font = pgeng.create_font((255, 255, 255))[1]
while True:
display.fill((100, 100, 100))
dt = pgeng.delta_time(clock, 60)
mouse = pygame.mouse.get_pos()
if turn and haveangle:
turnnumber = -4.5
elif haveangle:
turnnumber = 4.5
else:
turnnumber = 0
for i in range(6 - lighting * 4):
sparks.append(pgeng.Spark(mouse, randint(0, 360), uniform(3.5, 5), uniform(2, 3.5), (randint(100, 255), 0, randint(0, 100)))) #BLUE COLORS: (randint(0, 255), 255, 255)
for i, spark in sorted(enumerate(sparks), reverse=True):
if gravity:
spark.gravity(0.04, 1.4, dt)
#spark.move(0.04, dt)
#spark.angle_towards(90, 1.4, dt) #these lines do the same as spark.gravity()
else:
spark.move(0.04, dt, turnnumber)
if lighting:
spark.render(display, lighting_colour=(255, 0, 0), lighting_flag=BLEND_RGBA_ADD)
else:
spark.render(display)
if not spark.alive:
sparks.pop(i)
for i, wave in sorted(enumerate(shockwaves), reverse=True):
wave.move(1.5, 0.75, dt)
wave.render(display)
if not wave.alive:
shockwaves.pop(i)
for event in pygame.event.get():
if event.type == QUIT:
pgeng.quit_game()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pgeng.quit_game()
if event.key == K_SPACE:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(171, 255))))
lighting = not lighting
if lighting:
sparks = []
if event.key == K_RETURN:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(86, 170))))
gravity = not gravity
if event.key == K_F11:
screen.toggle_fullscreen()
if event.type == MOUSEBUTTONDOWN:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(0, 85))))
if event.button == 3:
haveangle = not haveangle
else:
turn = True
if event.type == MOUSEBUTTONUP:
turn = False
large_font.render(display, f'{round(clock.get_fps())}', (1, 1))
pygame.display.update()
clock.tick(144) | 34.261905 | 176 | 0.563933 | import pygame, pgeng
from random import uniform, randint
from pygame.locals import *
pygame.init()
(640, 480), SCALED | RESIZABLE, vsync=0)
display = screen.get_display()
clock = pygame.time.Clock()
pygame.mouse.set_visible(False)
sparks = []
shockwaves = []
turn = False
lighting = False
turnnumber = 0.08
haveangle = False
gravity = False
large_font = pgeng.create_font((255, 255, 255))[1]
while True:
display.fill((100, 100, 100))
dt = pgeng.delta_time(clock, 60)
mouse = pygame.mouse.get_pos()
if turn and haveangle:
turnnumber = -4.5
elif haveangle:
turnnumber = 4.5
else:
turnnumber = 0
for i in range(6 - lighting * 4):
sparks.append(pgeng.Spark(mouse, randint(0, 360), uniform(3.5, 5), uniform(2, 3.5), (randint(100, 255), 0, randint(0, 100))))
for i, spark in sorted(enumerate(sparks), reverse=True):
if gravity:
spark.gravity(0.04, 1.4, dt)
dt, turnnumber)
if lighting:
spark.render(display, lighting_colour=(255, 0, 0), lighting_flag=BLEND_RGBA_ADD)
else:
spark.render(display)
if not spark.alive:
sparks.pop(i)
for i, wave in sorted(enumerate(shockwaves), reverse=True):
wave.move(1.5, 0.75, dt)
wave.render(display)
if not wave.alive:
shockwaves.pop(i)
for event in pygame.event.get():
if event.type == QUIT:
pgeng.quit_game()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pgeng.quit_game()
if event.key == K_SPACE:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(171, 255))))
lighting = not lighting
if lighting:
sparks = []
if event.key == K_RETURN:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(86, 170))))
gravity = not gravity
if event.key == K_F11:
screen.toggle_fullscreen()
if event.type == MOUSEBUTTONDOWN:
shockwaves.append(pgeng.ShockWave(mouse, 20, 30, (255, 0, randint(0, 85))))
if event.button == 3:
haveangle = not haveangle
else:
turn = True
if event.type == MOUSEBUTTONUP:
turn = False
large_font.render(display, f'{round(clock.get_fps())}', (1, 1))
pygame.display.update()
clock.tick(144) | true | true |
1c360de1bbd0d8dd43183a8e7d04f990a1fe929a | 86,855 | py | Python | pyhdx/web/controllers.py | Jhsmit/PyHDX | 34bf653743008508bb14f24ccca21ee39b5b25e3 | [
"MIT"
] | 15 | 2020-10-14T14:15:54.000Z | 2022-03-31T17:55:36.000Z | pyhdx/web/controllers.py | Jhsmit/PyHDX | 34bf653743008508bb14f24ccca21ee39b5b25e3 | [
"MIT"
] | 145 | 2020-10-01T13:32:20.000Z | 2022-03-31T08:31:47.000Z | pyhdx/web/controllers.py | Jhsmit/PyHDX | 34bf653743008508bb14f24ccca21ee39b5b25e3 | [
"MIT"
] | 3 | 2021-03-03T10:57:05.000Z | 2021-05-24T09:11:49.000Z | import operator
import urllib.request
import zipfile
from collections import namedtuple
from io import StringIO, BytesIO
from pathlib import Path
import colorcet
import dask
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import panel as pn
import param
from numpy.lib.recfunctions import append_fields
from skimage.filters import threshold_multiotsu
from pyhdx import VERSION_STRING
from pyhdx.fileIO import read_dynamx, csv_to_protein, csv_to_dataframe, dataframe_to_stringio
from pyhdx.fitting import fit_rates_weighted_average, fit_rates_half_time_interpolate, get_bounds, fit_gibbs_global, \
fit_gibbs_global_batch, PATIENCE, STOP_LOSS, EPOCHS, R1, R2, optimizer_defaults
from pyhdx.models import PeptideMasterTable, HDXMeasurement, Protein, array_intersection
from pyhdx.web.base import ControlPanel, DEFAULT_COLORS, DEFAULT_CLASS_COLORS
from pyhdx.web.sources import DataSource, DataFrameSource
from pyhdx.web.transforms import ApplyCmapTransform
from pyhdx.web.widgets import ASyncProgressBar
from pyhdx.support import rgb_to_hex, hex_to_rgba, series_to_pymol
HalfLifeFitResult = namedtuple('HalfLifeFitResult', ['output'])
class MappingFileInputControl(ControlPanel):
"""
This controller allows users to upload *.txt files where quantities (protection factors, Gibbs free energy, etc) are
mapped to a linear sequence. The data is then used further downstream to generate binary comparisons between datasets.
The column should be tab separated with on the last header line (starts with '#') the names of the columns. Columns
should be tab-delimited.
"""
header = 'File Input'
input_file = param.Parameter(default=None, doc='Input file to add to available datasets')
dataset_name = param.String(doc='Name for the dataset to add. Defaults to filename')
offset = param.Integer(default=0, doc="Offset to add to the file's r_number column")
add_dataset = param.Action(lambda self: self._action_add_dataset(),
doc='Add the dataset to available datasets')
datasets_list = param.ListSelector(doc='Current datasets', label='Datasets')
remove_dataset = param.Action(lambda self: self._action_remove_dataset(),
doc='Remove selected datasets')
def __init__(self, parent, **params):
super(MappingFileInputControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput)
@param.depends('input_file', watch=True)
def _input_file_updated(self):
self.dataset_name = self.dataset_name or Path(self.widget_dict['input_file'].filename).stem
@property
def protein(self):
"""The protein object from the currently selected file in the file widget"""
try:
sio = StringIO(self.input_file.decode())
except UnicodeDecodeError:
self.parent.logger.info('Invalid file type, supplied file is not a text file')
return None
try:
sio.seek(0)
protein = txt_to_protein(sio)
except KeyError:
sio.seek(0)
protein = csv_to_protein(sio)
return protein
def _add_dataset(self):
self.parent.datasets[self.dataset_name] = self.protein
#todo refactor dataset to protein_something
def _action_add_dataset(self):
if self.dataset_name in self.parent.datasets.keys():
self.parent.logger.info(f'Dataset {self.dataset_name} already added')
elif not self.dataset_name:
self.parent.logger.info('The added comparison needs to have a name')
elif not self.input_file:
self.parent.logger.info('Empty or no file selected')
elif self.protein is not None:
self._add_dataset()
self.parent.param.trigger('datasets')
self.widget_dict['input_file'].filename = ''
self.widget_dict['input_file'].value = b''
self.dataset_name = ''
def _action_remove_dataset(self):
if self.datasets_list is not None:
for dataset_name in self.datasets_list:
self.parent.datasets.pop(dataset_name)
self.parent.param.trigger('datasets')
def _datasets_updated(self, events):
self.param['datasets_list'].objects = list(self.parent.datasets.keys())
import itertools
cmap_cycle = itertools.cycle(['gray','PiYG', 'jet'])
class CSVFileInputControl(ControlPanel):
input_file = param.Parameter()
load_file = param.Action(lambda self: self._action_load())
temp_new_data = param.Action(lambda self: self._action_new_data())
temp_new_cmap = param.Action(lambda self: self._action_new_cmap())
temp_update_filter = param.Action(lambda self: self._action_exposure())
temp_cmap_rect = param.Action(lambda self: self._action_cmap_rect())
#cmap_obj = param.ObjectSelector(default='viridis', objects=['viridis', 'plasma', 'magma'])
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))
def _action_load(self):
sio = StringIO(self.input_file.decode('UTF-8'))
df = csv_to_dataframe(sio)
source = DataFrameSource(df=df)
def _action_new_data(self):
source = self.parent.sources['torch_fit']
table = source.get('torch_fit')
size = len(table)
new_data = 40e3*np.random.rand(size)
table['deltaG'] = new_data
self.parent.update()
def _action_new_cmap(self):
cmap_name = np.random.choice(['viridis', 'inferno', 'plasma'])
cmap = mpl.cm.get_cmap(cmap_name)
transform = self.parent.transforms['cmap']
transform.cmap = cmap
self.parent.update()
def _action_exposure(self):
filter = self.parent.filters['exposure']
filter.widget.value = 0.
self.parent.update()
def _action_cmap_rect(self):
new_cmap = next(cmap_cycle)
rect_view = self.parent.figure_panels['rect_plot']
rect_view.opts['cmap'] = new_cmap
self.parent.update()
item = self.parent.rows['rect_plot'][0]
#item.param.trigger('object')
class TestFileInputControl(ControlPanel):
input_file = param.Parameter()
load_file = param.Action(lambda self: self._action_load())
_layout = {
'self': None,
'filters.exposure_slider': None
}
def __init__(self, parent, **params):
super().__init__(parent, **params)
# todo property and list of tuples
self._layout = {
'self': None,
'filters.exposure_slider': None
}
self.update_box()
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))
def _action_load(self):
sio = StringIO(self.input_file.decode('UTF-8'))
df = csv_to_dataframe(sio)
source = DataFrameSource(df=df)
class PeptideFileInputControl(ControlPanel):
"""
This controller allows users to input .csv file (Currently only DynamX format) of 'state' peptide uptake data.
Users can then choose how to correct for back-exchange and which 'state' and exposure times should be used for
analysis.
"""
header = 'Peptide Input'
input_files = param.List()
be_mode = param.Selector(doc='Select method of back exchange correction', label='Back exchange correction method', objects=['FD Sample', 'Flat percentage'])
fd_state = param.Selector(doc='State used to normalize uptake', label='FD State')
fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='FD Exposure')
exp_state = param.Selector(doc='State for selected experiment', label='Experiment State')
exp_exposures = param.ListSelector(default=[], objects=[''], label='Experiment Exposures'
, doc='Selected exposure time to use')
be_percent = param.Number(28., bounds=(0, 100), doc='Global percentage of back-exchange',
label='Back exchange percentage')
drop_first = param.Integer(1, bounds=(0, None), doc='Select the number of N-terminal residues to ignore.')
ignore_prolines = param.Boolean(True, constant=True, doc='Prolines are ignored as they do not exchange D.')
d_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the labelling buffer',
label='Deuterium percentage')
#fd_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the FD control sample buffer',
# label='FD Deuterium percentage')
temperature = param.Number(293.15, bounds=(0, 373.15), doc='Temperature of the D-labelling reaction',
label='Temperature (K)')
pH = param.Number(7.5, doc='pH of the D-labelling reaction, as read from pH meter',
label='pH read')
#load_button = param.Action(lambda self: self._action_load(), doc='Load the selected files', label='Load Files')
n_term = param.Integer(1, doc='Index of the n terminal residue in the protein. Can be set to negative values to '
'accommodate for purification tags. Used in the determination of intrinsic rate of exchange')
c_term = param.Integer(0, bounds=(0, None),
doc='Index of the c terminal residue in the protein. Used for generating pymol export script'
'and determination of intrinsic rate of exchange for the C-terminal residue')
sequence = param.String('', doc='Optional FASTA protein sequence')
dataset_name = param.String()
add_dataset_button = param.Action(lambda self: self._action_add_dataset(), label='Add dataset',
doc='Parse selected peptides for further analysis and apply back-exchange correction')
dataset_list = param.ObjectSelector(default=[], label='Datasets', doc='Lists available datasets')
def __init__(self, parent, **params):
super(PeptideFileInputControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['data_objects'])
excluded = ['be_percent']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
self._df = None # Numpy array with raw input data
@property
def _layout(self):
return [('self', self.own_widget_names)]
def make_dict(self):
text_area = pn.widgets.TextAreaInput(name='Sequence (optional)', placeholder='Enter sequence in FASTA format', max_length=10000,
width=300, height=100, height_policy='fixed', width_policy='fixed')
return self.generate_widgets(
input_files=pn.widgets.FileInput(multiple=True, name='Input files'),
temperature=pn.widgets.FloatInput,
#be_mode=pn.widgets.RadioButtonGroup,
be_percent=pn.widgets.FloatInput,
d_percentage=pn.widgets.FloatInput,
#fd_percentage=pn.widgets.FloatInput,
sequence=text_area)
def make_list(self):
excluded = ['be_percent']
widget_list = [widget for name, widget, in self.widget_dict.items() if name not in excluded]
return widget_list
@param.depends('be_mode', watch=True)
def _update_be_mode(self):
# todo @tejas: Add test
if self.be_mode == 'FD Sample':
excluded = ['be_percent']
elif self.be_mode == 'Flat percentage':
excluded = ['fd_state', 'fd_exposure']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
#self._layout = {'self': widgets}
self.update_box()
@param.depends('input_files', watch=True)
def _read_files(self):
""""""
if self.input_files:
combined_df = read_dynamx(*[StringIO(byte_content.decode('UTF-8')) for byte_content in self.input_files])
self._df = combined_df
self.parent.logger.info(
f'Loaded {len(self.input_files)} file{"s" if len(self.input_files) > 1 else ""} with a total '
f'of {len(self._df)} peptides')
else:
self._df = None
self._update_fd_state()
self._update_fd_exposure()
self._update_exp_state()
self._update_exp_exposure()
def _update_fd_state(self):
if self._df is not None:
states = list(self._df['state'].unique())
self.param['fd_state'].objects = states
self.fd_state = states[0]
else:
self.param['fd_state'].objects = []
@param.depends('fd_state', watch=True)
def _update_fd_exposure(self):
if self._df is not None:
fd_entries = self._df[self._df['state'] == self.fd_state]
exposures = list(np.unique(fd_entries['exposure']))
else:
exposures = []
self.param['fd_exposure'].objects = exposures
if exposures:
self.fd_exposure = exposures[0]
@param.depends('fd_state', 'fd_exposure', watch=True)
def _update_exp_state(self):
if self._df is not None:
# Booleans of data entries which are in the selected control
control_bools = np.logical_and(self._df['state'] == self.fd_state, self._df['exposure'] == self.fd_exposure)
control_data = self._df[control_bools].to_records()
other_data = self._df[~control_bools].to_records()
intersection = array_intersection([control_data, other_data], fields=['start', 'end']) # sequence?
states = list(np.unique(intersection[1]['state']))
else:
states = []
self.param['exp_state'].objects = states
if states:
self.exp_state = states[0] if not self.exp_state else self.exp_state
@param.depends('exp_state', watch=True)
def _update_exp_exposure(self):
if self._df is not None:
exp_entries = self._df[self._df['state'] == self.exp_state]
exposures = list(np.unique(exp_entries['exposure']))
exposures.sort()
else:
exposures = []
self.param['exp_exposures'].objects = exposures
self.exp_exposures = exposures
if not self.dataset_name or self.dataset_name in self.param['exp_state'].objects:
self.dataset_name = self.exp_state
if not self.c_term and exposures:
self.c_term = int(np.max(exp_entries['end']))
def _datasets_updated(self, events):
# Update datasets widget as datasets on parents change
objects = list(self.parent.data_objects.keys())
self.param['dataset_list'].objects = objects
def _action_add_dataset(self):
"""Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`"""
if self._df is None:
self.parent.logger.info("No data loaded")
return
elif self.dataset_list and self.dataset_name in self.dataset_list:
self.parent.logger.info(f"Dataset name {self.dataset_name} already in use")
return
peptides = PeptideMasterTable(self._df, d_percentage=self.d_percentage,
drop_first=self.drop_first, ignore_prolines=self.ignore_prolines)
if self.be_mode == 'FD Sample':
control_0 = None # = (self.zero_state, self.zero_exposure) if self.zero_state != 'None' else None
peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)
elif self.be_mode == 'Flat percentage':
# todo @tejas: Add test
peptides.set_backexchange(self.be_percent)
data = peptides.get_state(self.exp_state)
exp_bools = data['exposure'].isin(self.exp_exposures)
data = data[exp_bools]
#todo temperature ph kwarg for series
hdxm = HDXMeasurement(data, c_term=self.c_term, n_term=self.n_term, sequence=self.sequence,
name=self.dataset_name, temperature=self.temperature, pH=self.pH)
self.parent.data_objects[self.dataset_name] = hdxm
self.parent.param.trigger('data_objects') # Trigger update
df = hdxm.data
df['start_end'] = [str(s) + '_' + str(e) for s, e in zip(df['start'], df['end'])]
df['id'] = df.index % hdxm.Np
target_source = self.parent.sources['dataframe']
target_source.add_df(df, 'peptides', self.dataset_name)
index = pd.Index(hdxm.coverage.r_number, name='r_number')
df = pd.DataFrame(hdxm.rfu_residues, index=index, columns=hdxm.timepoints)
target_source = self.parent.sources['dataframe']
target_source.add_df(df, 'rfu', self.dataset_name)
self.dataset_list.append(self.dataset_name)
self.parent.logger.info(f'Loaded dataset {self.dataset_name} with experiment state {self.exp_state} '
f'({len(hdxm)} timepoints, {len(hdxm.coverage)} peptides each)')
self.parent.logger.info(f'Average coverage: {hdxm.coverage.percent_coverage:.3}%, '
f'Redundancy: {hdxm.coverage.redundancy:.2}')
def _action_remove_datasets(self):
raise NotImplementedError('Removing datasets not implemented')
for name in self.dataset_list:
self.parent.datasets.pop(name)
self.parent.param.trigger('datasets') # Manual trigger as key assignment does not trigger the param
# todo class DataManagerControl()
class CoverageControl(ControlPanel):
header = 'Coverage'
#temp_new_data = param.Action(lambda self: self._action_new_data())
def __init__(self, parent, **params):
super().__init__(parent, **params)
self.update_box()
@property
def _layout(self):
return [
# ('filters.coverage_state_name', None),
# ('filters.coverage_exposure', None),
('opts.cmap', None),
#('self', None)
]
class InitialGuessControl(ControlPanel):
"""
This controller allows users to derive initial guesses for D-exchange rate from peptide uptake data.
"""
#todo remove lambda symbol although its really really funny
header = 'Initial Guesses'
fitting_model = param.Selector(default='Half-life (λ)', objects=['Half-life (λ)', 'Association'],
doc='Choose method for determining initial guesses.')
dataset = param.Selector(default='', doc='Dataset to apply bounds to', label='Dataset (for bounds)')
global_bounds = param.Boolean(default=False, doc='Set bounds globally across all datasets')
lower_bound = param.Number(0., doc='Lower bound for association model fitting')
upper_bound = param.Number(0., doc='Upper bound for association model fitting')
guess_name = param.String(default='Guess_1', doc='Name for the initial guesses')
do_fit1 = param.Action(lambda self: self._action_fit(), label='Calculate Guesses', doc='Start initial guess fitting',
constant=True)
bounds = param.Dict({}, doc='Dictionary which stores rate fitting bounds', precedence=-1)
def __init__(self, parent, **params):
self.pbar1 = ASyncProgressBar() #tqdm? https://github.com/holoviz/panel/pull/2079
self.pbar2 = ASyncProgressBar()
super(InitialGuessControl, self).__init__(parent, **params)
self.parent.param.watch(self._parent_datasets_updated, ['data_objects']) #todo refactor
excluded = ['lower_bound', 'upper_bound', 'global_bounds', 'dataset']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
self._guess_names = {}
@property
def _layout(self):
return [
('self', self.own_widget_names),
# ('filters.select_index_rates_lv1', None),
# ('filters.select_index_rates_lv2', None),
]
def make_dict(self):
widgets = self.generate_widgets(lower_bound=pn.widgets.FloatInput, upper_bound=pn.widgets.FloatInput)
widgets.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)
return widgets
@param.depends('fitting_model', watch=True)
def _fitting_model_updated(self):
if self.fitting_model == 'Half-life (λ)':
excluded = ['dataset', 'lower_bound', 'upper_bound', 'global_bounds']
elif self.fitting_model in ['Association', 'Dissociation']:
excluded = []
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
@param.depends('global_bounds', watch=True)
def _global_bounds_updated(self):
if self.global_bounds:
self.param['dataset'].constant = True
else:
self.param['dataset'].constant = False
@param.depends('dataset', watch=True)
def _dataset_updated(self):
lower, upper = self.bounds[self.dataset]
self.lower_bound = lower
self.upper_bound = upper
@param.depends('lower_bound', 'upper_bound', watch=True)
def _bounds_updated(self):
# if self.global_bounds:
# for k in self.bounds.keys():
# self.bounds[k] = (self.lower_bound, self.upper_bound)
if not self.global_bounds:
self.bounds[self.dataset] = (self.lower_bound, self.upper_bound)
def _parent_datasets_updated(self, events):
if len(self.parent.data_objects) > 0:
self.param['do_fit1'].constant = False
# keys to remove:
for k in self.bounds.keys() - self.parent.data_objects.keys():
self.bounds.pop(k)
# keys to add:
for k in self.parent.data_objects.keys() - self.bounds.keys():
self.bounds[k] = get_bounds(self.parent.data_objects[k].timepoints)
options = list(self.parent.data_objects.keys())
self.param['dataset'].objects = options
if not self.dataset:
self.dataset = options[0]
def add_fit_result(self, future):
name = self._guess_names.pop(future.key)
results = future.result()
dfs = [result.output for result in results]
combined_results = pd.concat(dfs, axis=1,
keys=list(self.parent.data_objects.keys()),
names=['state_name', 'quantity'])
self.sources['dataframe'].add_df(combined_results, 'rates', name)
self.parent.fit_results[name] = {k: v for k, v in zip(self.parent.data_objects.keys(), results)}
self.parent.param.trigger('data_objects') # Informs other fittings that initial guesses are now available
self.param['do_fit1'].constant = False
def _action_fit(self):
if len(self.parent.data_objects) == 0:
self.parent.logger.info('No datasets loaded')
return
if self.guess_name in itertools.chain(self.parent.fit_results.keys(), self._guess_names.values()):
self.parent.logger.info(f"Guess with name {self.guess_name} already in use")
return
self.parent.logger.debug('Start initial guess fit')
self.param['do_fit1'].constant = True
num_samples = len(self.parent.data_objects)
if self.fitting_model.lower() in ['association', 'dissociation']:
if self.global_bounds:
bounds = [(self.lower_bound, self.upper_bound)]*num_samples
else:
bounds = self.bounds.values()
futures = self.parent.client.map(fit_rates_weighted_average,
self.parent.data_objects.values(), bounds, client='worker_client')
elif self.fitting_model == 'Half-life (λ)': # this is practically instantaneous and does not require dask
futures = self.parent.client.map(fit_rates_half_time_interpolate, self.parent.data_objects.values())
dask_future = self.parent.client.submit(lambda args: args, futures) #combine multiple futures into one future
self._guess_names[dask_future.key] = self.guess_name
self.parent.future_queue.append((dask_future, self.add_fit_result))
class FitControl(ControlPanel):
"""
This controller allows users to execute PyTorch fitting of the global data set.
Currently, repeated fitting overrides the old result.
"""
header = 'Fitting'
initial_guess = param.Selector(doc='Name of dataset to use for initial guesses.')
fit_mode = param.Selector(default='Batch', objects=['Batch', 'Single'])
stop_loss = param.Number(STOP_LOSS, bounds=(0, None),
doc='Threshold loss difference below which to stop fitting.')
stop_patience = param.Integer(PATIENCE, bounds=(1, None),
doc='Number of epochs where stop loss should be satisfied before stopping.')
learning_rate = param.Number(optimizer_defaults['SGD']['lr'], bounds=(0, None),
doc='Learning rate parameter for optimization.')
momentum = param.Number(optimizer_defaults['SGD']['momentum'], bounds=(0, None),
doc='Stochastic Gradient Descent momentum')
nesterov = param.Boolean(optimizer_defaults['SGD']['nesterov'],
doc='Use Nesterov type of momentum for SGD')
epochs = param.Integer(EPOCHS, bounds=(1, None),
doc='Maximum number of epochs (iterations.')
r1 = param.Number(R1, bounds=(0, None), label='Regularizer 1 (peptide axis)',
doc='Value of the regularizer along residue axis.')
r2 = param.Number(R2, bounds=(0, None), label='Regularizer 2 (sample axis)',
doc='Value of the regularizer along sample axis.', constant=True)
fit_name = param.String("Gibbs_fit_1", doc="Name for for the fit result")
do_fit = param.Action(lambda self: self._action_fit(), constant=True, label='Do Fitting',
doc='Start global fitting')
def __init__(self, parent, **params):
self.pbar1 = ASyncProgressBar() #tqdm?
super(FitControl, self).__init__(parent, **params)
source = self.parent.sources['dataframe']
source.param.watch(self._source_updated, ['updated'])
self._current_jobs = 0
self._max_jobs = 2 #todo config
self._fit_names = {}
def _source_updated(self, *events):
table = self.parent.sources['dataframe'].get('rates')
objects = list(table.columns.levels[0])
if objects:
self.param['do_fit'].constant = False
self._fit_mode_updated()
self.param['initial_guess'].objects = objects
if not self.initial_guess and objects:
self.initial_guess = objects[0]
@param.depends('fit_mode', watch=True)
def _fit_mode_updated(self):
if self.fit_mode == 'Batch' and len(self.parent.data_objects) > 1:
self.param['r2'].constant = False
else:
self.param['r2'].constant = True
def add_fit_result(self, future):
#todo perhaps all these dfs should be in the future?
name = self._fit_names.pop(future.key)
result = future.result()
self._current_jobs -= 1
self.parent.logger.info(f'Finished PyTorch fit: {name}')
# List of single fit results
if isinstance(result, list):
self.parent.fit_results[name] = list(result)
output_dfs = {fit_result.hdxm_set.name: fit_result.output for fit_result in result}
df = pd.concat(output_dfs.values(), keys=output_dfs.keys(), axis=1)
# create mse losses dataframe
dfs = {}
for single_result in result:
# Determine mean squared errors per peptide, summed over timepoints
mse = single_result.get_mse()
mse_sum = np.sum(mse, axis=1)
peptide_data = single_result.hdxm_set[0].data
data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum}
dfs[single_result.hdxm_set.name] = pd.DataFrame(data_dict)
mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)
#todo d calc for single fits
#todo losses for single fits
# Create d_calc dataframe
# -----------------------
# todo needs cleaning up
state_dfs = {}
for single_result in result:
tp_flat = single_result.hdxm_set.timepoints
elem = tp_flat[np.nonzero(tp_flat)]
time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)
d_calc_state = single_result(time_vec) #shape Np x Nt
hdxm = single_result.hdxm_set
peptide_dfs = []
pm_data = hdxm[0].data
for d_peptide, pm_row in zip(d_calc_state, pm_data):
peptide_id = f"{pm_row['start']}_{pm_row['end']}"
data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}
peptide_dfs.append(pd.DataFrame(data_dict))
state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)
d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)
# Create losses/epoch dataframe
# -----------------------------
losses_dfs = {fit_result.hdxm_set.name: fit_result.losses for fit_result in result}
losses_df = pd.concat(losses_dfs.values(), keys=losses_dfs.keys(), axis=1)
else: # one batchfit result
self.parent.fit_results[name] = result # todo this name can be changed by the time this is executed
df = result.output
# df.index.name = 'peptide index'
# Create MSE losses df (per peptide, summed over timepoints)
# -----------------------
mse = result.get_mse()
dfs = {}
for mse_sample, hdxm in zip(mse, result.hdxm_set):
peptide_data = hdxm[0].data
mse_sum = np.sum(mse_sample, axis=1)
# Indexing of mse_sum with Np to account for zero-padding
data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum[:hdxm.Np]}
dfs[hdxm.name] = pd.DataFrame(data_dict)
mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)
self.parent.logger.info('Finished PyTorch fit')
# Create d_calc dataframe
# -----------------------
tp_flat = result.hdxm_set.timepoints.flatten()
elem = tp_flat[np.nonzero(tp_flat)]
time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)
stacked = np.stack([time_vec for i in range(result.hdxm_set.Ns)])
d_calc = result(stacked)
state_dfs = {}
for hdxm, d_calc_state in zip(result.hdxm_set, d_calc):
peptide_dfs = []
pm_data = hdxm[0].data
for d_peptide, idx in zip(d_calc_state, pm_data.index):
peptide_id = f"{pm_data.loc[idx, 'start']}_{pm_data.loc[idx, 'end']}"
data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}
peptide_dfs.append(pd.DataFrame(data_dict))
state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)
d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)
# Create losses/epoch dataframe
# -----------------------------
losses_df = result.losses.copy()
losses_df.columns = pd.MultiIndex.from_product(
[['All states'], losses_df.columns],
names=['state_name', 'quantity']
)
self.parent.logger.info(
f"Finished fitting in {len(result.losses)} epochs, final mean squared residuals is {result.mse_loss:.2f}")
self.parent.logger.info(f"Total loss: {result.total_loss:.2f}, regularization loss: {result.reg_loss:.2f} "
f"({result.regularization_percentage:.1f}%)")
self.parent.sources['dataframe'].add_df(df, 'global_fit', names=[name])
self.parent.sources['dataframe'].add_df(mse_df, 'peptides_mse', names=[name])
self.parent.sources['dataframe'].add_df(d_calc_df, 'd_calc', names=[name])
self.parent.sources['dataframe'].add_df(losses_df, 'losses', names=[name])
self.parent.param.trigger('fit_results')
def _action_fit(self):
if self.fit_name in itertools.chain(self.parent.fit_results.keys(), self._fit_names.values()):
self.parent.logger.info(f"Fit result with name {self.fit_name} already in use")
return
self.parent.logger.info('Started PyTorch fit')
self._current_jobs += 1
if self._current_jobs >= self._max_jobs:
self.widgets['do_fit'].constant = True
self.parent.logger.info(f'Current number of active jobs: {self._current_jobs}')
if self.fit_mode == 'Batch':
hdx_set = self.parent.hdx_set
rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)
rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]
gibbs_guess = hdx_set.guess_deltaG(rates_guess)
dask_future = self.parent.client.submit(fit_gibbs_global_batch, hdx_set, gibbs_guess, **self.fit_kwargs)
else:
data_objs = self.parent.data_objects.values()
rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)
gibbs_guesses = [data_obj.guess_deltaG(rates_df[data_obj.name]['rate']) for data_obj in data_objs]
futures = self.parent.client.map(fit_gibbs_global, data_objs, gibbs_guesses, **self.fit_kwargs)
# Combine list of futures into one future object
# See https://github.com/dask/distributed/pull/560
dask_future = self.parent.client.submit(lambda args: args, futures)
self._fit_names[dask_future.key] = self.fit_name
self.parent.future_queue.append((dask_future, self.add_fit_result))
@property
def fit_kwargs(self):
fit_kwargs = dict(r1=self.r1, lr=self.learning_rate, momentum=self.momentum, nesterov=self.nesterov,
epochs=self.epochs, patience=self.stop_patience, stop_loss=self.stop_loss)
if self.fit_mode == 'Batch':
fit_kwargs['r2'] = self.r2
return fit_kwargs
class ClassificationControl(ControlPanel):
"""
This controller allows users classify 'mapping' datasets and assign them colors.
Coloring can be either in discrete categories or as a continuous custom color map.
"""
header = 'Classification'
# format ['tag1', ('tag2a', 'tag2b') ] = tag1 OR (tag2a AND tag2b)
# todo unify name for target field (target_data set)
# When coupling param with the same name together there should be an option to exclude this behaviour
table = param.Selector(label='Target table')
# fit_ID = param.Selector() # generalize selecting widgets based on selected table
# quantity = param.Selector(label='Quantity') # this is the lowest-level quantity of the multiindex df (filter??)
mode = param.Selector(default='Discrete', objects=['Discrete', 'Continuous', 'Color map'],
doc='Choose color mode (interpolation between selected colors).')#, 'ColorMap'])
num_colors = param.Integer(3, bounds=(1, 10), label='Number of colours',
doc='Number of classification colors.')
library = param.Selector(default='matplotlib', objects=['matplotlib', 'colorcet'])
color_map = param.Selector()
otsu_thd = param.Action(lambda self: self._action_otsu(), label='Otsu',
doc="Automatically perform thresholding based on Otsu's method.")
linear_thd = param.Action(lambda self: self._action_linear(), label='Linear',
doc='Automatically perform thresholding by creating equally spaced sections.')
log_space = param.Boolean(False,
doc='Boolean to set whether to apply colors in log space or not.')
#apply = param.Action(lambda self: self._action_apply())
no_coverage = param.Color(default='#8c8c8c', doc='Color to use for regions of no coverage')
color_set_name = param.String('', doc='Name for the color dataset to add')
add_colorset = param.Action(lambda self: self._action_add_colorset())
#show_thds = param.Boolean(True, label='Show Thresholds', doc='Toggle to show/hide threshold lines.')
values = param.List(default=[], precedence=-1)
colors = param.List(default=[], precedence=-1)
def __init__(self, parent, **param):
super(ClassificationControl, self).__init__(parent, **param)
# https://discourse.holoviz.org/t/based-on-a-select-widget-update-a-second-select-widget-then-how-to-link-the-latter-to-a-reactive-plot/917/8
cc_cmaps = sorted(colorcet.cm.keys())
mpl_cmaps = sorted(set(plt.colormaps()) - set('cet_' + cmap for cmap in cc_cmaps))
self.cmaps = {'matplotlib': mpl_cmaps, 'colorcet': cc_cmaps}
self.param['color_map'].objects = mpl_cmaps
self._update_num_colors()
self._update_num_values()
self.excluded = ['library', 'color_map'] # excluded widgets based on choice of `mode`
views = [view for view in self.views.values() if any(isinstance(trs, ApplyCmapTransform) for trs in view.transforms)]
options = [view.table for view in views]
for view in views:
view.source.param.watch(self._sources_updated, 'updated')
self.param['table'].objects = options
if not self.table and options:
self.table = options[0]
self._table_updated() # also updates box
#self.update_box()
@property
def own_widget_names(self):
"""returns a list of names of widgets in self.widgets to be laid out in controller card"""
# initial_widgets = [name for name in self.widgets.keys() if name not in self.excluded]
initial_widgets = []
for name in self.param:
precedence = self.param[name].precedence
if (precedence is None or precedence > 0) and name not in self.excluded + ['name']:
initial_widgets.append(name)
#l1[1:1] = l2
select_widgets = [name for name in self.widgets.keys() if name.startswith('select')]
initial_widgets[1:1] = select_widgets
#value_widget_names = [f'value_{i}' for i in range(len(self.values))]
#color_widget_names = [f'color_{i}' for i in range(len(self.colors))]
widget_names = initial_widgets + [f'value_{i}' for i in range(len(self.values))]
if self.mode != 'Color map':
widget_names += [f'color_{i}' for i in range(len(self.colors))]
return widget_names
# return initial_widgets + #list(self.values_widgets.keys()) + list(self.colors_widgets.keys())
def make_dict(self):
return self.generate_widgets(num_colors=pn.widgets.IntInput)
@property
def _layout(self):
return [
('self', self.own_widget_names),
]
def _sources_updated(self, *events):
self._table_updated()
@param.depends('table', watch=True)
def _table_updated(self):
df = self.get_data()
#todo also get schema and check if this table is compatible (ie has numbers, not colors only)
if df.empty:
return
names = df.columns.names
# Remove old widgets (list comprehension)
old_widget_names = [key for key in self.widgets.keys() if key.startswith('select')]
[self.widgets.pop(key) for key in old_widget_names]
widget_dict = {}
for i, (name, options) in enumerate(zip(names, df.columns.levels)):
_opts = ['*'] + list(options) if i != len(names) - 1 else list(options)
#todo make function to determine defaults
if i == 0:
default = _opts[-1]
else:
default = 'deltaG' if 'deltaG' in _opts else _opts[0]
widget = pn.widgets.Select(name=name, options=_opts, value=default)
widget_dict[f'select_{i}'] = widget
self.widgets.update(widget_dict)
self.update_box()
def get_data(self):
"""object pandas dataframe: returns current multindex dataframe"""
source = self.sources['dataframe']
df = source.get(self.table)
return df
def get_selected_data(self):
#todo move method to data source?
df = self.get_data()
selected_fields = [widget.value for name, widget in self.widgets.items() if name.startswith('select')]
bools_list = [df.columns.get_level_values(i) == value for i, value in enumerate(selected_fields) if
value != '*']
if len(bools_list) == 0:
bools = np.ones(len(df.columns)).astype(bool)
elif len(bools_list) == 1:
bools = np.array(bools_list).flatten()
else:
bools_array = np.array(bools_list)
bools = np.product(bools_array, axis=0).astype(bool)
selected_df = df.iloc[:, bools]
return selected_df
def get_values(self):
"""return numpy array with only the values from selected dataframe, nan omitted"""
array = self.get_selected_data().to_numpy().flatten()
values = array[~np.isnan(array)]
return values
def _action_otsu(self):
if self.num_colors <= 1:
return
values = self.get_values() # todo check for no values
if not values.size:
return
func = np.log if self.log_space else lambda x: x # this can have NaN when in log space
thds = threshold_multiotsu(func(values), classes=self.num_colors)
widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]
for thd, widget in zip(thds[::-1], widgets): # Values from high to low
widget.start = None
widget.end = None
widget.value = np.exp(thd) if self.log_space else thd
self._update_bounds()
#self._get_colors()
def _action_linear(self):
i = 1 if self.mode == 'Discrete' else 0
values = self.get_values()
if not values.size:
return
if self.log_space:
thds = np.logspace(np.log(np.min(values)), np.log(np.max(values)),
num=self.num_colors + i, endpoint=True, base=np.e)
else:
thds = np.linspace(np.min(values), np.max(values), num=self.num_colors + i, endpoint=True)
widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]
for thd, widget in zip(thds[i:self.num_colors][::-1], widgets):
# Remove bounds, set values, update bounds
widget.start = None
widget.end = None
widget.value = thd
self._update_bounds()
def _action_add_colorset(self):
if not self.color_set_name:
self.parent.logger.info('No name given tot the colorset')
return
source = self.sources['dataframe']
if self.color_set_name in source.tables.keys(): #todo update
self.parent.logger.info(f'Colorset with name {self.color_set_name} already present')
return
selected_df = self.get_selected_data()
cmap, norm = self.get_cmap_and_norm()
array = cmap(norm(selected_df), bytes=True)
colors_hex = rgb_to_hex(array.reshape(-1, 4))
output = colors_hex.reshape(array.shape[:-1])
output_df = pd.DataFrame(output, index=selected_df.index, columns=selected_df.columns)
if output_df.index.name == 'r_number': # The selected dataset is a protein mappable
c_term = max([data_obj.coverage.protein.c_term for data_obj in self.parent.data_objects.values()])
n_term = min([data_obj.coverage.protein.n_term for data_obj in self.parent.data_objects.values()])
new_index = pd.RangeIndex(start=n_term, stop=c_term, name='r_number')
output_df = output_df.reindex(index=new_index, fill_value=self.no_coverage.upper())
output_df.rename_axis(columns={'fit_ID': 'color_ID'}, inplace=True)
output_df.columns = output_df.columns.set_levels([self.color_set_name], level=0)
source.add_df(output_df, 'colors')
@param.depends('color_map', 'values', 'colors', watch=True)
def _action_apply(self):
cmap, norm = self.get_cmap_and_norm()
if cmap and norm:
#this needs to be updated to more generalized
transform = self.transforms['cmap_transform']
transform.cmap = cmap
transform.norm = norm
def get_cmap_and_norm(self):
norm_klass = mpl.colors.Normalize if not self.log_space else mpl.colors.LogNorm
if len(self.values) < 2:
return None, None
if self.mode == 'Discrete':
if len(self.values) != len(self.colors) - 1:
return None, None
cmap = mpl.colors.ListedColormap(self.colors)
norm = mpl.colors.BoundaryNorm(self.values[::-1], self.num_colors, extend='both') #todo refactor values to thd_values
elif self.mode == 'Continuous':
norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)
positions = norm(self.values[::-1])
cmap = mpl.colors.LinearSegmentedColormap.from_list('custom_cmap', list(zip(positions, self.colors)))
elif self.mode == 'Color map':
norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)
if self.library == 'matplotlib':
cmap = mpl.cm.get_cmap(self.color_map)
elif self.library == 'colorcet':
cmap = getattr(colorcet, 'm_' + self.color_map)
cmap.set_bad(self.no_coverage)
return cmap, norm
@param.depends('library', watch=True)
def _update_library(self):
options = self.cmaps[self.library]
self.param['color_map'].objects = options
@param.depends('mode', watch=True)
def _mode_updated(self):
if self.mode == 'Discrete':
self.excluded = ['library', 'color_map']
# self.num_colors = max(3, self.num_colors)
# self.param['num_colors'].bounds = (3, None)
elif self.mode == 'Continuous':
self.excluded = ['library', 'color_map', 'otsu_thd']
# self.param['num_colors'].bounds = (2, None)
elif self.mode == 'Color map':
self.excluded = ['otsu_thd', 'num_colors']
self.num_colors = 2
#todo adjust add/ remove color widgets methods
self.param.trigger('num_colors')
self.update_box()
@param.depends('num_colors', watch=True)
def _update_num_colors(self):
while len(self.colors) != self.num_colors:
if len(self.colors) > self.num_colors:
self._remove_color()
elif len(self.colors) < self.num_colors:
self._add_color()
self.param.trigger('colors')
@param.depends('num_colors', watch=True)
def _update_num_values(self):
diff = 1 if self.mode == 'Discrete' else 0
while len(self.values) != self.num_colors - diff:
if len(self.values) > self.num_colors - diff:
self._remove_value()
elif len(self.values) < self.num_colors - diff:
self._add_value()
self._update_bounds()
self.param.trigger('values')
self.update_box()
def _add_value(self):
# value widgets are ordered in decreasing order, ergo next value widget
# starts with default value of previous value -1
try:
first_value = self.values[-1]
except IndexError:
first_value = 0
default = float(first_value - 1)
self.values.append(default)
name = f'Threshold {len(self.values)}'
key = f'value_{len(self.values) - 1}' # values already populated, first name starts at 1
widget = pn.widgets.FloatInput(name=name, value=default)
self.widgets[key] = widget
widget.param.watch(self._value_event, ['value'])
def _remove_value(self):
key = f'value_{len(self.values) - 1}'
widget = self.widgets.pop(key)
self.values.pop()
[widget.param.unwatch(watcher) for watcher in widget.param._watchers]
del widget
def _add_color(self):
try:
default = DEFAULT_CLASS_COLORS[len(self.colors)]
except IndexError:
default = "#"+''.join(np.random.choice(list('0123456789abcdef'), 6))
self.colors.append(default)
key = f'color_{len(self.colors) - 1}'
widget = pn.widgets.ColorPicker(value=default)
self.widgets[key] = widget
widget.param.watch(self._color_event, ['value'])
def _remove_color(self):
key = f'color_{len(self.colors) - 1}'
widget = self.widgets.pop(key)
self.colors.pop()
[widget.param.unwatch(watcher) for watcher in widget.param._watchers]
del widget
def _color_event(self, *events):
for event in events:
idx = list(self.widgets.values()).index(event.obj)
key = list(self.widgets.keys())[idx]
widget_index = int(key.split('_')[1])
# idx = list(self.colors_widgets).index(event.obj)
self.colors[widget_index] = event.new
self.param.trigger('colors')
#todo param trigger colors????
def _value_event(self, *events):
"""triggers when a single value gets changed"""
for event in events:
idx = list(self.widgets.values()).index(event.obj)
key = list(self.widgets.keys())[idx]
widget_index = int(key.split('_')[1])
self.values[widget_index] = event.new
self._update_bounds()
self.param.trigger('values')
def _update_bounds(self):
#for i, widget in enumerate(self.values_widgets.values()):
for i in range(len(self.values)):
widget = self.widgets[f'value_{i}']
if i > 0:
key = f'value_{i-1}'
prev_value = float(self.widgets[key].value)
widget.end = np.nextafter(prev_value, prev_value - 1)
else:
widget.end = None
if i < len(self.values) - 1:
key = f'value_{i+1}'
next_value = float(self.widgets[key].value)
widget.start = np.nextafter(next_value, next_value + 1)
else:
widget.start = None
class ProteinControl(ControlPanel):
header = 'Protein Control'
input_mode = param.Selector(doc='Method of protein structure input', objects=['PDB File', 'RCSB Download'])
file_binary = param.Parameter()
rcsb_id = param.String(doc='RCSB ID of protein to download')
load_structure = param.Action(lambda self: self._action_load_structure())
def __init__(self, parent, **params):
super(ProteinControl, self).__init__(parent, **params)
excluded = ['rcsb_id']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
@property
def _layout(self):
return [('self', self.own_widget_names),
('filters.ngl_color_id', None),
('filters.ngl_state_name', None),
]
def make_dict(self):
return self.generate_widgets(file_binary=pn.widgets.FileInput(multiple=False, accept='.pdb'))
@param.depends('input_mode', watch=True)
def _update_input_mode(self):
if self.input_mode == 'PDB File':
excluded = ['rcsb_id']
elif self.input_mode == 'RCSB Download':
excluded = ['file_binary']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
def _action_load_structure(self):
view = self.views['protein']
if self.input_mode == 'PDB File':
pdb_string = self.file_binary.decode()
view.ngl_view.pdb_string = pdb_string
elif self.input_mode == 'RCSB Download':
if len(self.rcsb_id) != 4:
self.parent.logger.info(f"Invalid RCSB pdb id: {self.rcsb_id}")
return
url = f'http://files.rcsb.org/download/{self.rcsb_id}.pdb'
with urllib.request.urlopen(url) as response:
pdb_string = response.read().decode()
view.ngl_view.pdb_string = pdb_string
class GraphControl(ControlPanel):
header = 'Graph Control'
spin = param.Boolean(default=False, doc='Spin the protein object')
state_name = param.Selector(doc="Name of the currently selected state")
fit_id = param.Selector(doc="Name of the currently selected fit ID")
peptide_index = param.Selector(doc="Index of the currently selected peptide")
def __init__(self, parent, **params):
super(GraphControl, self).__init__(parent, **params)
source = self.sources['dataframe']
source.param.watch(self._source_updated, 'updated')
def make_dict(self):
widgets = {
'general': pn.pane.Markdown('### General'),
'coverage': pn.pane.Markdown('### Coverage'),
'peptide': pn.pane.Markdown('### Peptide'),
'losses': pn.pane.Markdown('### Losses'),
'debugging': pn.pane.Markdown('### Debugging'),
}
return {**widgets, **self.generate_widgets()}
def _source_updated(self, *events):
source = self.sources['dataframe']
table = source.get('global_fit')
fit_id_options = list(table.columns.get_level_values(0).unique())
self.param['fit_id'].objects = fit_id_options
if not self.fit_id and fit_id_options:
self.fit_id = fit_id_options[0]
table = source.get('peptides')
state_name_options = list(table.columns.get_level_values(0).unique())
self.param['state_name'].objects = state_name_options
if not self.state_name and state_name_options:
self.state_name = state_name_options[0]
@param.depends('state_name', watch=True)
def _update_state_name(self):
#https://param.holoviz.org/reference.html#param.parameterized.batch_watch
dwarfs = ['coverage_state_name', 'coverage_mse_state_name', 'peptide_d_exp_state_name', 'peptide_d_calc_state_name',
'deltaG_state_name', 'rates_state_name', 'ngl_state_name'] # there really are 7
# one filter to rule them all, one filter to find them,
# one filter to bring them all, and in the darkness bind them;
# in the Land of Mordor where the shadows lie.
for dwarf in dwarfs:
filt = self.filters[dwarf]
filt.value = self.state_name
# If current fit result was done as single, also update the state for the losses graph
losses_filt = self.filters['losses_state_name']
if self.state_name in losses_filt.param['value'].objects:
losses_filt.value = self.state_name
# Update possible choices for peptide selection depending on selected state
source = self.sources['dataframe']
table = source.get('peptides')
unique_vals = table[self.state_name]['start_end'].unique()
peptide_options = list(range(len(unique_vals)))
self.param['peptide_index'].objects = peptide_options
if self.peptide_index is not None and peptide_options:
self.peptide_index = peptide_options[0]
@param.depends('fit_id', watch=True)
def _update_fit_id(self):
elves = ['coverage_mse_fit_id', 'peptide_d_calc_fit_id', 'deltaG_fit_id', 'losses_fit_id']
for elf in elves:
filt = self.filters[elf]
filt.value = self.fit_id
# perhaps this is faster?
# widget = self.widget.clone()
# self.widget.link(widget, value='value', bidirectional=True)
@param.depends('peptide_index', watch=True)
def _update_peptide_index(self):
hobbits = ['peptide_d_exp_select', 'peptide_d_calc_select']
for hobbit in hobbits:
filt = self.filters[hobbit]
filt.value = self.peptide_index
@property
def _layout(self):
return [
# ('self', ['coverage']),
# ('filters.select_index', None),
# ('filters.exposure_slider', None),
# ('opts.cmap', None),
('self', ['general']),
('self', ['fit_id', 'state_name']),
('self', ['coverage']),
('filters.coverage_exposure', None),
('self', ['peptide', 'peptide_index']),
('self', ['losses']),
('filters.losses_state_name', None),
# ('self', ['debugging']),
# ('filters.deltaG_fit_id', None),
# ('filters.coverage_mse_fit_id', None),
]
@param.depends('spin', watch=True)
def _spin_updated(self):
view = self.views['protein']
view.ngl_view.spin = self.spin
class FileExportControl(ControlPanel):
# todo check if docstring is true
"""
This controller allows users to export and download datasets.
All datasets can be exported as .txt tables.
'Mappable' datasets (with r_number column) can be exported as .pml pymol script, which colors protein structures
based on their 'color' column.
"""
header = "File Export"
table = param.Selector(label='Target dataset', doc='Name of the dataset to export')
export_format = param.Selector(default='csv', objects=['csv', 'pprint'],
doc="Format of the exported tables."
"'csv' is machine-readable, 'pprint' is human-readable format")
#todo add color param an dlink with protein viewer color
def __init__(self, parent, **param):
super(FileExportControl, self).__init__(parent, **param)
objects = list(self.sources['dataframe'].tables.keys())
self.param['table'].objects = objects
self.table = objects[0]
self.sources['dataframe'].param.watch(self._source_updated, 'updated')
def make_dict(self):
widgets = self.generate_widgets()
widgets['export_tables'] = pn.widgets.FileDownload(
label='Download table',
callback=self.table_export_callback
)
widgets['export_pml'] = pn.widgets.FileDownload(label='Download pml scripts',
callback=self.pml_export_callback,
)
return widgets
@property
def _layout(self):
return [
('self', None)
]
def _source_updated(self, *events):
self.param['table'].objects = list(self.sources['dataframe'].tables.keys())
self._table_updated()
@param.depends('table', 'export_format', watch=True)
def _table_updated(self):
self.df = self.sources['dataframe'].get(self.table)
ext = '.csv' if self.export_format == 'csv' else '.txt'
self.widgets['export_tables'].filename = self.table + ext
if self.table == 'colors':
self.widgets['export_pml'].disabled = False
self.widgets['export_pml'].filename = self.table + '_pml_scripts.zip'
else:
self.widgets['export_pml'].disabled = True
@pn.depends('table')
def pml_export_callback(self):
if self.table:
#todo check if table is valid for pml conversion
bio = BytesIO()
with zipfile.ZipFile(bio, 'w') as pml_zip:
for col_name in self.df.columns:
name = col_name if isinstance(col_name, str) else '_'.join(col_name)
colors = self.df[col_name]
pml_script = series_to_pymol(colors) # todo refactor pd_series_to_pymol?
pml_zip.writestr(name + '.pml', pml_script)
bio.seek(0)
return bio
@pn.depends('table') # param.depends?
def table_export_callback(self):
if self.table:
io = dataframe_to_stringio(self.df, fmt=self.export_format)
return io
else:
return None
class SingleMappingFileInputControl(MappingFileInputControl):
"""
This controller allows users to upload *.txt files where quantities (protection factors, Gibbs free energy, etc) are
mapped to a linear sequence.
The column should be tab separated with on the last header line (starts with '#') the names of the columns. Columns
should be tab-delimited.
"""
def _action_add_dataset(self):
super()._action_add_dataset()
to_add_keys = set(self.parent.datasets.keys()) - set(self.parent.sources.keys())
for key in to_add_keys:
records = self.parent.datasets[key].to_records()
data_source = DataSource(records, tags=['comparison', 'mapping'], x='r_number',
renderer='circle', size=10)
self.parent.publish_data(key, data_source)
class MatrixMappingFileInputControl(SingleMappingFileInputControl):
datapoints = param.ListSelector(doc='Select datapoints to include in the matrix')
def _action_add_dataset(self):
super()._action_add_dataset()
N = 20
img = np.empty((N, N), dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((N, N, 4))
for i in range(N):
for j in range(N):
view[i, j, 0] = int(i / N * 255)
view[i, j, 1] = 158
view[i, j, 2] = int(j / N * 255)
view[i, j, 3] = 255
values = np.random.random(img.shape)
img_ds_dict = {'img': [img], 'scores': [values]}
data_source = DataSource(img_ds_dict, tags=['image'], name='scores_image', x=0, y=0)
self.parent.publish_data('scores_image', data_source)
def make_list(self):
widget_list = super().make_list()
datapoints_widget = widget_list.pop()
widget_list.insert(3, datapoints_widget)
return widget_list
def _add_dataset(self):
full_dict = self.protein.to_dict()
data_dict = {k: v for k, v in full_dict.items() if k in self.datapoints}
data_dict['r_number'] = self.protein.index
protein = Protein(data_dict, index='r_number')
self.parent.datasets[self.dataset_name] = protein
@param.depends('input_file', watch=True)
def _input_file_updated(self):
super()._input_file_updated()
if self.input_file:
header_fields = self.protein.df.columns
float_fields = [f for f in header_fields if f.replace('.', '', 1).isdigit()]
self.param['datapoints'].objects = float_fields
self.datapoints = float_fields
# self.dataset_name = self.dataset_name or Path(self.widget_dict['input_file'].filename).stem
class MatrixImageControl(ControlPanel):
"""
This controller takes an input loaded matrix and converts it to an (rgba) interpolated rendered image
"""
class FDPeptideFileInputControl(PeptideFileInputControl):
# todo @tejas: Add test
# This requires making a test function with the full_deuteration_app in apps.py
def make_list(self):
parameters = ['add_button', 'clear_button', 'drop_first', 'load_button', 'd_percentage',
'fd_state', 'fd_exposure', 'parse_button']
first_widgets = list([self.widget_dict[par] for par in parameters])
return self.file_selectors + first_widgets
def _action_parse(self):
"""Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`"""
pmt = self.parent.peptides
data_states = pmt.data[pmt.data['state'] == self.fd_state]
data_exposure = data_states[data_states['exposure'] == self.fd_exposure]
scores = 100 * data_exposure['uptake'] / data_exposure['ex_residues']
data_final = append_fields(data_exposure, 'scores', data=scores, usemask=False)
# pmt.set_control((fd_state, fd_exposure))
series = HDXMeasurement(data_final)
self.parent.series = series
self.parent.logger.info(f"Loaded FD control '{self.exp_state}' with {len(series.coverage)} peptides")
self.parent.logger.info(f'Mean deuteration is {scores.mean()}%, std {scores.std()}%')
class PeptideFoldingFileInputControl(PeptideFileInputControl):
# todo @tejas: Add test
# This requires making a test function with the folding in apps.py
be_mode = param.Selector(doc='Select method of normalization', label='Norm mode', objects=['Exp', 'Theory']
, precedence=-1)
fd_state = param.Selector(doc='State used to normalize uptake', label='100% Control State')
fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='100% Control Exposure')
zero_state = param.Selector(doc='State used to zero uptake', label='0% Control State')
zero_exposure = param.Selector(doc='Exposure used to zero uptake', label='0% Control Exposure')
def make_dict(self):
return self.generate_widgets()
def make_list(self):
parameters = ['add_button', 'clear_button', 'drop_first', 'ignore_prolines', 'load_button',
'fd_state', 'fd_exposure', 'zero_state', 'zero_exposure', 'exp_state',
'exp_exposures', 'parse_button']
first_widgets = list([self.widget_dict[par] for par in parameters])
return self.file_selectors + first_widgets
def _action_load(self):
super()._action_load()
states = list(np.unique(self.parent.peptides.data['state']))
self.param['zero_state'].objects = states
self.zero_state = states[0]
@param.depends('fd_state', 'fd_exposure', watch=True)
def _update_experiment(self):
#TODO THIS needs to be updated to also incorporate the zero (?)
pm_dict = self.parent.peptides.return_by_name(self.fd_state, self.fd_exposure)
states = list(np.unique([v.state for v in pm_dict.values()]))
self.param['exp_state'].objects = states
self.exp_state = states[0] if not self.exp_state else self.exp_state
@param.depends('zero_state', watch=True)
def _update_zero_exposure(self):
b = self.parent.peptides.data['state'] == self.zero_state
data = self.parent.peptides.data[b]
exposures = list(np.unique(data['exposure']))
self.param['zero_exposure'].objects = exposures
if exposures:
self.control_exposure = exposures[0]
def _action_parse(self):
"""Apply controls to :class:`~pyhdx.models.PeptideMasterTable` and set :class:`~pyhdx.models.HDXMeasurement`"""
control_0 = self.zero_state, self.zero_exposure
self.parent.peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)
data_states = self.parent.peptides.data[self.parent.peptides.data['state'] == self.exp_state]
data = data_states[np.isin(data_states['exposure'], self.exp_exposures)]
series = HDXMeasurement(data)
self.parent.series = series
self._publish_scores()
self.parent.logger.info(f'Loaded experiment state {self.exp_state} '
f'({len(series)} timepoints, {len(series.coverage)} peptides each)')
class DifferenceControl(ControlPanel):
"""
This controller allows users to select two datasets from available datasets, choose a quantity to compare between,
and choose the type of operation between quantities (Subtract/Divide).
"""
header = 'Differences'
dataset_1 = param.Selector(doc='First dataset to compare')
dataset_2 = param.Selector(doc='Second dataset to compare')
comparison_name = param.String()
operation = param.Selector(default='Subtract', objects=['Subtract', 'Divide'],
doc='Select the operation to perform between the two datasets')
comparison_quantity = param.Selector(doc="Select a quantity to compare (column from input txt file)")
add_comparison = param.Action(lambda self: self._action_add_comparison(),
doc='Click to add this comparison to available comparisons')
comparison_list = param.ListSelector(doc='Lists available comparisons')
remove_comparison = param.Action(lambda self: self._action_remove_comparison(),
doc='Remove selected comparisons from the list')
def __init__(self, parent, **params):
super(DifferenceControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def _datasets_updated(self, events):
objects = list(self.parent.datasets.keys())
self.param['dataset_1'].objects = objects
if not self.dataset_1:
self.dataset_1 = objects[0]
self.param['dataset_2'].objects = objects
if not self.dataset_2:# or self.dataset_2 == objects[0]: # dataset2 default to second dataset? toggle user modify?
self.dataset_2 = objects[0]
@param.depends('dataset_1', 'dataset_2', watch=True)
def _selection_updated(self):
if self.datasets:
unique_names = set.intersection(*[{name for name in protein.df.dtypes.index} for protein in self.datasets])
objects = [name for name in unique_names if np.issubdtype(self.protein_1[name].dtype, np.number)]
objects.sort()
# todo check for scara dtype
self.param['comparison_quantity'].objects = objects
if self.comparison_quantity is None:
self.comparison_quantity = objects[0]
@property
def protein_1(self):
""":class:`~pyhdx.models.Protein`: Protein object of dataset 1"""
try:
return self.parent.datasets[self.dataset_1]
except KeyError:
return None
@property
def protein_2(self):
""":class:`~pyhdx.models.Protein`: Protein object of dataset 2"""
try:
return self.parent.datasets[self.dataset_2]
except KeyError:
return None
@property
def datasets(self):
""":obj:`tuple`: Tuple with `(protein_1, protein_2)"""
datasets = (self.protein_1, self.protein_2)
if None in datasets:
return None
else:
return datasets
def _action_add_comparison(self):
if not self.comparison_name:
self.parent.logger.info('The added comparison needs to have a name')
return
if self.datasets is None:
return
op = {'Subtract': operator.sub, 'Divide': operator.truediv}[self.operation]
comparison = op(*[p[self.comparison_quantity] for p in self.datasets]).rename('comparison')
value1 = self.protein_1[self.comparison_quantity].rename('value1')
value2 = self.protein_2[self.comparison_quantity].rename('value2')
df = pd.concat([comparison, value1, value2], axis=1)
output = df.to_records()
data_source = DataSource(output, tags=['comparison', 'mapping'], x='r_number', y='comparison',
renderer='circle', size=10)
self.parent.publish_data(self.comparison_name, data_source) # Triggers parent.sources param
self.comparison_name = ''
def _action_remove_comparison(self):
for comparison in self.comparison_list:
self.parent.sources.pop(comparison) #Popping from dicts does not trigger param
self.parent.param.trigger('sources')
@param.depends('parent.sources', watch=True)
def _update_comparison_list(self):
objects = [name for name, d in self.parent.sources.items() if 'comparison' in d.tags]
self.param['comparison_list'].objects = objects
class SingleControl(ControlPanel):
# todo @tejas: Add test
"""
This controller allows users to select a dataset from available datasets, and choose a quantity to classify/visualize,
and add this quantity to the available datasets.
"""
#todo subclass with DifferenceControl
#rename dataset_name
header = 'Datasets'
dataset = param.Selector(doc='Dataset')
dataset_name = param.String(doc='Name of the dataset to add')
quantity = param.Selector(doc="Select a quantity to plot (column from input txt file)")
add_dataset = param.Action(lambda self: self._action_add_dataset(),
doc='Click to add this comparison to available comparisons')
dataset_list = param.ListSelector(doc='Lists available comparisons')
remove_dataset = param.Action(lambda self: self._action_remove_comparison(),
doc='Remove selected datasets from available datasets')
def __init__(self, parent, **params):
super(SingleControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def _datasets_updated(self, events):
objects = list(self.parent.datasets.keys())
self.param['dataset'].objects = objects
if not self.dataset:
self.dataset = objects[0]
@param.depends('dataset', watch=True)
def _selection_updated(self):
if self.dataset:
dataset = self.parent.datasets[self.dataset]
names = dataset.dtype.names
objects = [name for name in names if name != 'r_number']
self.param['quantity'].objects = objects
if self.quantity is None:
self.quantity = objects[0]
def _action_add_dataset(self):
if not self.dataset_name:
self.parent.logger.info('The added comparison needs to have a name')
return
if not self.dataset:
return
array = self.parent.datasets[self.dataset]
data_source = DataSource(array, tags=['comparison', 'mapping'], x='r_number', y=self.quantity,
renderer='circle', size=10)
self.parent.publish_data(self.dataset_name, data_source) # Triggers parent.sources param
self.comparison_name = ''
def _action_remove_comparison(self):
for ds in self.dataset_list:
self.parent.sources.pop(ds) #Popping from dicts does not trigger param
self.parent.param.trigger('sources')
@param.depends('parent.sources', watch=True)
def _update_dataset_list(self):
objects = [name for name, d in self.parent.sources.items()]
self.param['dataset_list'].objects = objects
class FDCoverageControl(CoverageControl):
def make_list(self):
lst = super(CoverageControl, self).make_list()
return lst[:-1]
class FoldingFitting(InitialGuessControl):
fitting_model = param.Selector(default='Dissociation', objects=['Dissociation'],
doc='Choose method for determining initial guesses.')
def make_list(self):
self.widget_dict.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)
parameters = ['fitting_model', 'lower_bound', 'upper_bound', 'do_fit1', 'pbar1']
widget_list = list([self.widget_dict[par] for par in parameters])
return widget_list
class FitResultControl(ControlPanel):
# @tejas skip test, currently bugged, issue #182
"""
This controller allows users to view to fit result and how it describes the uptake of every peptide.
"""
header = 'Fit Results'
peptide_index = param.Integer(0, bounds=(0, None),
doc='Index of the peptide to display.')
x_axis_type = param.Selector(default='Log', objects=['Linear', 'Log'],
doc='Choose whether to plot the x axis as Logarithmic axis or Linear.')
def __init__(self, parent, **param):
super(FitResultControl, self).__init__(parent, **param)
self.d_uptake = {} ## Dictionary of arrays (N_p, N_t) with results of fit result model calls
#todo why does still still exists should it not just be dataobjects??
# --> because they need to be calcualted only once and then dataobjects are generated per index
# can be improved probably (by putting all data in data source a priory?
self.parent.param.watch(self._series_updated, ['datasets']) #todo refactor
self.parent.param.watch(self._fit_results_updated, ['fit_results'])
def _series_updated(self, *events):
pass
#
# self.param['peptide_index'].bounds = (0, len(self.parent.series.coverage.data) - 1)
# self.d_uptake['uptake_corrected'] = self.parent.series.uptake_corrected.T
# self._update_sources()
@property
def fit_timepoints(self):
time = np.logspace(-2, np.log10(self.parent.series.timepoints.max()), num=250)
time = np.insert(time, 0, 0.)
return time
def _fit_results_updated(self, *events):
accepted_fitresults = ['fr_pfact']
#todo wrappertje which checks with a cached previous version of this particular param what the changes are even it a manual trigger
for name, fit_result in self.parent.fit_results.items():
if name in accepted_fitresults:
D_upt = fit_result(self.fit_timepoints)
self.d_uptake[name] = D_upt
else:
continue
# push results to graph
self._update_sources()
@param.depends('peptide_index', watch=True)
def _update_sources(self):
for name, array in self.d_uptake.items():
if name == 'uptake_corrected': ## this is the raw data
timepoints = self.parent.series.timepoints
renderer = 'circle'
color = '#000000'
else:
timepoints = self.fit_timepoints
renderer = 'line'
color = '#bd0d1f' #todo css / default color cycle per Figure Panel?
dic = {'time': timepoints, 'uptake': array[self.peptide_index, :]}
data_source = DataSource(dic, x='time', y='uptake', tags=['uptake_curve'], renderer=renderer, color=color)
self.parent.publish_data(name, data_source)
class ColoringControl(ClassificationControl):
# WIP class, skip tests
def make_dict(self):
widgets_dict = super().make_dict()
widgets_dict.pop('quantity')
return widgets_dict
@param.depends('values', 'colors', 'target', 'quantity', watch=True)
def _get_colors(self):
# todo this part is repeated
if np.all(self.values == 0):
return
elif np.any(np.diff(self.values) > 0): # Skip applying colors when not strictly monotonic descending
return
elif not self.target:
return
elif 'scores_image' not in self.parent.sources.keys():
return
tgt_source = self.parent.sources[self.target] # full array including nan entries
r_number = tgt_source.source.data['r_number']
assert np.all(np.diff(r_number) == 1)
headers = [f for f in tgt_source.source.data.keys() if f.replace('.', '', 1).isdigit()]
headers.sort(key=float)
timepoints = np.array([float(f) for f in headers])
N_interpolate = 500
interp_timepoints = np.linspace(0, timepoints.max(), num=N_interpolate, endpoint=True)
data_array = np.stack([tgt_source.source.data[k] for k in headers])
array = np.stack([np.interp(interp_timepoints, timepoints, data) for data in data_array.T]).T
colors_hex = self._calc_colors(array.flatten()) # colors are in hex format
if colors_hex is None: # this is the colors not between 0 and 1 bug / error
return
colors_hex[colors_hex == 'nan'] = '#8c8c8c'
colors_rgba = np.array([hex_to_rgba(h) for h in colors_hex])
shape = (N_interpolate, len(r_number))
img = np.empty(shape, dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape(*shape, 4)
view[:] = colors_rgba.reshape(*shape, 4)
img_source = self.parent.sources['scores_image']
img_source.render_kwargs['dw'] = r_number.max()
img_source.render_kwargs['dh'] = timepoints.max()
img_source.source.data.update(img=[img], scores=[array])
#self.parent.sources[self.target].source.data['color'] = colors
class DifferenceFileExportControl(FileExportControl):
"""
This controller allows users to export and download datasets.
'Mappable' datasets (with r_number column) can be exported as .pml pymol script, which colors protein structures
based on their 'color' column.
"""
accepted_tags = ['mapping']
#todo include comparison info (x vs y) in output
def _sources_updated(self, *events): #refactor _parent_sources_updated on classificationcontrol
data_sources = [k for k, src in self.parent.sources.items() if src.resolve_tags(self.accepted_tags)]
self.param['target'].objects = list(data_sources)
# Set target if its not set already
if not self.target and data_sources:
self.target = data_sources[-1]
@pn.depends('target', watch=True)
def _update_filename(self):
self.export_linear_download.filename = self.target + '_linear.txt'
if 'r_number' in self.export_dict.keys():
self.pml_script_download.filename = self.target + '_pymol.pml'
class OptionsControl(ControlPanel):
"""The controller is used for various settings."""
header = 'Options'
#todo this should be a component (mixin?) for apps who dont have these figures
link_xrange = param.Boolean(True, doc='Link the X range of the coverage figure and other linear mapping figures.', constant=False)
log_level = param.Selector(default='DEBUG', objects=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', 'OFF', 'TRACE'],
doc='Set the logging level.')
def __init__(self, parent, **param):
super(OptionsControl, self).__init__(parent, **param)
@property
def enabled(self):
return self.master_figure is not None and self.client_figures is not None
@param.depends('link_xrange', watch=True)
def _update_link(self):
if self.enabled:
if self.link_xrange:
self._link()
else:
self._unlink()
@property
def client_figures(self):
client_names = ['RateFigure', 'PFactFigure']
return [self.parent.figure_panels[name].figure for name in client_names]
@property
def master_figure(self):
return self.parent.figure_panels['CoverageFigure'].figure
@property
def figures(self):
return [self.master_figure] + self.client_figures
def _unlink(self):
for fig in self.figures:
fig.x_range.js_property_callbacks.pop('change:start')
fig.x_range.js_property_callbacks.pop('change:end')
def _link(self):
for client in self.client_figures:
self.master_figure.x_range.js_link('start', client.x_range, 'start')
self.master_figure.x_range.js_link('end', client.x_range, 'end')
client.x_range.js_link('start', self.master_figure.x_range, 'start')
client.x_range.js_link('end', self.master_figure.x_range, 'end')
class DeveloperControl(ControlPanel):
"""Controller with debugging options"""
header = 'Developer Options'
test_logging = param.Action(lambda self: self._action_test_logging())
breakpoint_btn = param.Action(lambda self: self._action_break())
test_btn = param.Action(lambda self: self._action_test())
trigger_btn = param.Action(lambda self: self._action_trigger())
print_btn = param.Action(lambda self: self._action_print())
runtime_warning = param.Action(lambda self: self._action_runtime())
def __init__(self, parent, **params):
super(DeveloperControl, self).__init__(parent, **params)
def _action_test_logging(self):
print(self.parent.logger)
self.parent.logger.debug('TEST DEBUG MESSAGE')
#logging.info('THis is some info')
for i in range(20):
self.parent.logger.info('dit is een test123')
def _action_print(self):
hdx_set = self.parent.hdx_set
print(hdx_set.names)
guess = self.parent.control_panels['FitControl']
rates_df = self.sources['dataframe'].get('rates', fit_ID=guess.initial_guess)
print(guess.initial_guess)
print(rates_df)
rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]
gibbs_guess = hdx_set.guess_deltaG(rates_guess)
def _action_break(self):
main_ctrl = self.parent
control_panels = main_ctrl.control_panels
views = main_ctrl.views
sources = main_ctrl.sources
mse_view = views['coverage_mse']
data = mse_view.get_data()
print('mse')
print(data)
coverage_view = views['coverage']
data = coverage_view.get_data()
print('coverage')
print(data)
print('Time for a break')
def _action_test(self):
src_file = r'C:\Users\jhsmi\pp\PyHDX\tests\test_data\ecSecB_torch_fit.txt'
array = txt_to_np(src_file)
data_dict = {name: array[name] for name in array.dtype.names}
data_dict['color'] = np.full_like(array, fill_value=DEFAULT_COLORS['pfact'], dtype='<U7')
data_source = DataSource(data_dict, x='r_number', tags=['mapping', 'pfact', 'deltaG'],
renderer='circle', size=10, name='global_fit')
self.parent.publish_data('global_fit', data_source)
def _action_trigger(self):
deltaG_figure = self.parent.figure_panels['DeltaGFigure']
deltaG_figure.bk_pane.param.trigger('object')
def _action_runtime(self):
result = np.mean([]) | 41.657074 | 160 | 0.632307 | import operator
import urllib.request
import zipfile
from collections import namedtuple
from io import StringIO, BytesIO
from pathlib import Path
import colorcet
import dask
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import panel as pn
import param
from numpy.lib.recfunctions import append_fields
from skimage.filters import threshold_multiotsu
from pyhdx import VERSION_STRING
from pyhdx.fileIO import read_dynamx, csv_to_protein, csv_to_dataframe, dataframe_to_stringio
from pyhdx.fitting import fit_rates_weighted_average, fit_rates_half_time_interpolate, get_bounds, fit_gibbs_global, \
fit_gibbs_global_batch, PATIENCE, STOP_LOSS, EPOCHS, R1, R2, optimizer_defaults
from pyhdx.models import PeptideMasterTable, HDXMeasurement, Protein, array_intersection
from pyhdx.web.base import ControlPanel, DEFAULT_COLORS, DEFAULT_CLASS_COLORS
from pyhdx.web.sources import DataSource, DataFrameSource
from pyhdx.web.transforms import ApplyCmapTransform
from pyhdx.web.widgets import ASyncProgressBar
from pyhdx.support import rgb_to_hex, hex_to_rgba, series_to_pymol
HalfLifeFitResult = namedtuple('HalfLifeFitResult', ['output'])
class MappingFileInputControl(ControlPanel):
header = 'File Input'
input_file = param.Parameter(default=None, doc='Input file to add to available datasets')
dataset_name = param.String(doc='Name for the dataset to add. Defaults to filename')
offset = param.Integer(default=0, doc="Offset to add to the file's r_number column")
add_dataset = param.Action(lambda self: self._action_add_dataset(),
doc='Add the dataset to available datasets')
datasets_list = param.ListSelector(doc='Current datasets', label='Datasets')
remove_dataset = param.Action(lambda self: self._action_remove_dataset(),
doc='Remove selected datasets')
def __init__(self, parent, **params):
super(MappingFileInputControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput)
@param.depends('input_file', watch=True)
def _input_file_updated(self):
self.dataset_name = self.dataset_name or Path(self.widget_dict['input_file'].filename).stem
@property
def protein(self):
try:
sio = StringIO(self.input_file.decode())
except UnicodeDecodeError:
self.parent.logger.info('Invalid file type, supplied file is not a text file')
return None
try:
sio.seek(0)
protein = txt_to_protein(sio)
except KeyError:
sio.seek(0)
protein = csv_to_protein(sio)
return protein
def _add_dataset(self):
self.parent.datasets[self.dataset_name] = self.protein
#todo refactor dataset to protein_something
def _action_add_dataset(self):
if self.dataset_name in self.parent.datasets.keys():
self.parent.logger.info(f'Dataset {self.dataset_name} already added')
elif not self.dataset_name:
self.parent.logger.info('The added comparison needs to have a name')
elif not self.input_file:
self.parent.logger.info('Empty or no file selected')
elif self.protein is not None:
self._add_dataset()
self.parent.param.trigger('datasets')
self.widget_dict['input_file'].filename = ''
self.widget_dict['input_file'].value = b''
self.dataset_name = ''
def _action_remove_dataset(self):
if self.datasets_list is not None:
for dataset_name in self.datasets_list:
self.parent.datasets.pop(dataset_name)
self.parent.param.trigger('datasets')
def _datasets_updated(self, events):
self.param['datasets_list'].objects = list(self.parent.datasets.keys())
import itertools
cmap_cycle = itertools.cycle(['gray','PiYG', 'jet'])
class CSVFileInputControl(ControlPanel):
input_file = param.Parameter()
load_file = param.Action(lambda self: self._action_load())
temp_new_data = param.Action(lambda self: self._action_new_data())
temp_new_cmap = param.Action(lambda self: self._action_new_cmap())
temp_update_filter = param.Action(lambda self: self._action_exposure())
temp_cmap_rect = param.Action(lambda self: self._action_cmap_rect())
#cmap_obj = param.ObjectSelector(default='viridis', objects=['viridis', 'plasma', 'magma'])
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))
def _action_load(self):
sio = StringIO(self.input_file.decode('UTF-8'))
df = csv_to_dataframe(sio)
source = DataFrameSource(df=df)
def _action_new_data(self):
source = self.parent.sources['torch_fit']
table = source.get('torch_fit')
size = len(table)
new_data = 40e3*np.random.rand(size)
table['deltaG'] = new_data
self.parent.update()
def _action_new_cmap(self):
cmap_name = np.random.choice(['viridis', 'inferno', 'plasma'])
cmap = mpl.cm.get_cmap(cmap_name)
transform = self.parent.transforms['cmap']
transform.cmap = cmap
self.parent.update()
def _action_exposure(self):
filter = self.parent.filters['exposure']
filter.widget.value = 0.
self.parent.update()
def _action_cmap_rect(self):
new_cmap = next(cmap_cycle)
rect_view = self.parent.figure_panels['rect_plot']
rect_view.opts['cmap'] = new_cmap
self.parent.update()
item = self.parent.rows['rect_plot'][0]
#item.param.trigger('object')
class TestFileInputControl(ControlPanel):
input_file = param.Parameter()
load_file = param.Action(lambda self: self._action_load())
_layout = {
'self': None,
'filters.exposure_slider': None
}
def __init__(self, parent, **params):
super().__init__(parent, **params)
# todo property and list of tuples
self._layout = {
'self': None,
'filters.exposure_slider': None
}
self.update_box()
def make_dict(self):
return self.generate_widgets(input_file=pn.widgets.FileInput(accept='.csv,.txt'))
def _action_load(self):
sio = StringIO(self.input_file.decode('UTF-8'))
df = csv_to_dataframe(sio)
source = DataFrameSource(df=df)
class PeptideFileInputControl(ControlPanel):
header = 'Peptide Input'
input_files = param.List()
be_mode = param.Selector(doc='Select method of back exchange correction', label='Back exchange correction method', objects=['FD Sample', 'Flat percentage'])
fd_state = param.Selector(doc='State used to normalize uptake', label='FD State')
fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='FD Exposure')
exp_state = param.Selector(doc='State for selected experiment', label='Experiment State')
exp_exposures = param.ListSelector(default=[], objects=[''], label='Experiment Exposures'
, doc='Selected exposure time to use')
be_percent = param.Number(28., bounds=(0, 100), doc='Global percentage of back-exchange',
label='Back exchange percentage')
drop_first = param.Integer(1, bounds=(0, None), doc='Select the number of N-terminal residues to ignore.')
ignore_prolines = param.Boolean(True, constant=True, doc='Prolines are ignored as they do not exchange D.')
d_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the labelling buffer',
label='Deuterium percentage')
#fd_percentage = param.Number(95., bounds=(0, 100), doc='Percentage of deuterium in the FD control sample buffer',
# label='FD Deuterium percentage')
temperature = param.Number(293.15, bounds=(0, 373.15), doc='Temperature of the D-labelling reaction',
label='Temperature (K)')
pH = param.Number(7.5, doc='pH of the D-labelling reaction, as read from pH meter',
label='pH read')
#load_button = param.Action(lambda self: self._action_load(), doc='Load the selected files', label='Load Files')
n_term = param.Integer(1, doc='Index of the n terminal residue in the protein. Can be set to negative values to '
'accommodate for purification tags. Used in the determination of intrinsic rate of exchange')
c_term = param.Integer(0, bounds=(0, None),
doc='Index of the c terminal residue in the protein. Used for generating pymol export script'
'and determination of intrinsic rate of exchange for the C-terminal residue')
sequence = param.String('', doc='Optional FASTA protein sequence')
dataset_name = param.String()
add_dataset_button = param.Action(lambda self: self._action_add_dataset(), label='Add dataset',
doc='Parse selected peptides for further analysis and apply back-exchange correction')
dataset_list = param.ObjectSelector(default=[], label='Datasets', doc='Lists available datasets')
def __init__(self, parent, **params):
super(PeptideFileInputControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['data_objects'])
excluded = ['be_percent']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
self._df = None # Numpy array with raw input data
@property
def _layout(self):
return [('self', self.own_widget_names)]
def make_dict(self):
text_area = pn.widgets.TextAreaInput(name='Sequence (optional)', placeholder='Enter sequence in FASTA format', max_length=10000,
width=300, height=100, height_policy='fixed', width_policy='fixed')
return self.generate_widgets(
input_files=pn.widgets.FileInput(multiple=True, name='Input files'),
temperature=pn.widgets.FloatInput,
#be_mode=pn.widgets.RadioButtonGroup,
be_percent=pn.widgets.FloatInput,
d_percentage=pn.widgets.FloatInput,
#fd_percentage=pn.widgets.FloatInput,
sequence=text_area)
def make_list(self):
excluded = ['be_percent']
widget_list = [widget for name, widget, in self.widget_dict.items() if name not in excluded]
return widget_list
@param.depends('be_mode', watch=True)
def _update_be_mode(self):
# todo @tejas: Add test
if self.be_mode == 'FD Sample':
excluded = ['be_percent']
elif self.be_mode == 'Flat percentage':
excluded = ['fd_state', 'fd_exposure']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
#self._layout = {'self': widgets}
self.update_box()
@param.depends('input_files', watch=True)
def _read_files(self):
if self.input_files:
combined_df = read_dynamx(*[StringIO(byte_content.decode('UTF-8')) for byte_content in self.input_files])
self._df = combined_df
self.parent.logger.info(
f'Loaded {len(self.input_files)} file{"s" if len(self.input_files) > 1 else ""} with a total '
f'of {len(self._df)} peptides')
else:
self._df = None
self._update_fd_state()
self._update_fd_exposure()
self._update_exp_state()
self._update_exp_exposure()
def _update_fd_state(self):
if self._df is not None:
states = list(self._df['state'].unique())
self.param['fd_state'].objects = states
self.fd_state = states[0]
else:
self.param['fd_state'].objects = []
@param.depends('fd_state', watch=True)
def _update_fd_exposure(self):
if self._df is not None:
fd_entries = self._df[self._df['state'] == self.fd_state]
exposures = list(np.unique(fd_entries['exposure']))
else:
exposures = []
self.param['fd_exposure'].objects = exposures
if exposures:
self.fd_exposure = exposures[0]
@param.depends('fd_state', 'fd_exposure', watch=True)
def _update_exp_state(self):
if self._df is not None:
# Booleans of data entries which are in the selected control
control_bools = np.logical_and(self._df['state'] == self.fd_state, self._df['exposure'] == self.fd_exposure)
control_data = self._df[control_bools].to_records()
other_data = self._df[~control_bools].to_records()
intersection = array_intersection([control_data, other_data], fields=['start', 'end']) # sequence?
states = list(np.unique(intersection[1]['state']))
else:
states = []
self.param['exp_state'].objects = states
if states:
self.exp_state = states[0] if not self.exp_state else self.exp_state
@param.depends('exp_state', watch=True)
def _update_exp_exposure(self):
if self._df is not None:
exp_entries = self._df[self._df['state'] == self.exp_state]
exposures = list(np.unique(exp_entries['exposure']))
exposures.sort()
else:
exposures = []
self.param['exp_exposures'].objects = exposures
self.exp_exposures = exposures
if not self.dataset_name or self.dataset_name in self.param['exp_state'].objects:
self.dataset_name = self.exp_state
if not self.c_term and exposures:
self.c_term = int(np.max(exp_entries['end']))
def _datasets_updated(self, events):
# Update datasets widget as datasets on parents change
objects = list(self.parent.data_objects.keys())
self.param['dataset_list'].objects = objects
def _action_add_dataset(self):
if self._df is None:
self.parent.logger.info("No data loaded")
return
elif self.dataset_list and self.dataset_name in self.dataset_list:
self.parent.logger.info(f"Dataset name {self.dataset_name} already in use")
return
peptides = PeptideMasterTable(self._df, d_percentage=self.d_percentage,
drop_first=self.drop_first, ignore_prolines=self.ignore_prolines)
if self.be_mode == 'FD Sample':
control_0 = None # = (self.zero_state, self.zero_exposure) if self.zero_state != 'None' else None
peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)
elif self.be_mode == 'Flat percentage':
# todo @tejas: Add test
peptides.set_backexchange(self.be_percent)
data = peptides.get_state(self.exp_state)
exp_bools = data['exposure'].isin(self.exp_exposures)
data = data[exp_bools]
#todo temperature ph kwarg for series
hdxm = HDXMeasurement(data, c_term=self.c_term, n_term=self.n_term, sequence=self.sequence,
name=self.dataset_name, temperature=self.temperature, pH=self.pH)
self.parent.data_objects[self.dataset_name] = hdxm
self.parent.param.trigger('data_objects') # Trigger update
df = hdxm.data
df['start_end'] = [str(s) + '_' + str(e) for s, e in zip(df['start'], df['end'])]
df['id'] = df.index % hdxm.Np
target_source = self.parent.sources['dataframe']
target_source.add_df(df, 'peptides', self.dataset_name)
index = pd.Index(hdxm.coverage.r_number, name='r_number')
df = pd.DataFrame(hdxm.rfu_residues, index=index, columns=hdxm.timepoints)
target_source = self.parent.sources['dataframe']
target_source.add_df(df, 'rfu', self.dataset_name)
self.dataset_list.append(self.dataset_name)
self.parent.logger.info(f'Loaded dataset {self.dataset_name} with experiment state {self.exp_state} '
f'({len(hdxm)} timepoints, {len(hdxm.coverage)} peptides each)')
self.parent.logger.info(f'Average coverage: {hdxm.coverage.percent_coverage:.3}%, '
f'Redundancy: {hdxm.coverage.redundancy:.2}')
def _action_remove_datasets(self):
raise NotImplementedError('Removing datasets not implemented')
for name in self.dataset_list:
self.parent.datasets.pop(name)
self.parent.param.trigger('datasets') # Manual trigger as key assignment does not trigger the param
# todo class DataManagerControl()
class CoverageControl(ControlPanel):
header = 'Coverage'
#temp_new_data = param.Action(lambda self: self._action_new_data())
def __init__(self, parent, **params):
super().__init__(parent, **params)
self.update_box()
@property
def _layout(self):
return [
# ('filters.coverage_state_name', None),
# ('filters.coverage_exposure', None),
('opts.cmap', None),
#('self', None)
]
class InitialGuessControl(ControlPanel):
#todo remove lambda symbol although its really really funny
header = 'Initial Guesses'
fitting_model = param.Selector(default='Half-life (λ)', objects=['Half-life (λ)', 'Association'],
doc='Choose method for determining initial guesses.')
dataset = param.Selector(default='', doc='Dataset to apply bounds to', label='Dataset (for bounds)')
global_bounds = param.Boolean(default=False, doc='Set bounds globally across all datasets')
lower_bound = param.Number(0., doc='Lower bound for association model fitting')
upper_bound = param.Number(0., doc='Upper bound for association model fitting')
guess_name = param.String(default='Guess_1', doc='Name for the initial guesses')
do_fit1 = param.Action(lambda self: self._action_fit(), label='Calculate Guesses', doc='Start initial guess fitting',
constant=True)
bounds = param.Dict({}, doc='Dictionary which stores rate fitting bounds', precedence=-1)
def __init__(self, parent, **params):
self.pbar1 = ASyncProgressBar() #tqdm? https://github.com/holoviz/panel/pull/2079
self.pbar2 = ASyncProgressBar()
super(InitialGuessControl, self).__init__(parent, **params)
self.parent.param.watch(self._parent_datasets_updated, ['data_objects']) #todo refactor
excluded = ['lower_bound', 'upper_bound', 'global_bounds', 'dataset']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
self._guess_names = {}
@property
def _layout(self):
return [
('self', self.own_widget_names),
# ('filters.select_index_rates_lv1', None),
# ('filters.select_index_rates_lv2', None),
]
def make_dict(self):
widgets = self.generate_widgets(lower_bound=pn.widgets.FloatInput, upper_bound=pn.widgets.FloatInput)
widgets.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)
return widgets
@param.depends('fitting_model', watch=True)
def _fitting_model_updated(self):
if self.fitting_model == 'Half-life (λ)':
excluded = ['dataset', 'lower_bound', 'upper_bound', 'global_bounds']
elif self.fitting_model in ['Association', 'Dissociation']:
excluded = []
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
@param.depends('global_bounds', watch=True)
def _global_bounds_updated(self):
if self.global_bounds:
self.param['dataset'].constant = True
else:
self.param['dataset'].constant = False
@param.depends('dataset', watch=True)
def _dataset_updated(self):
lower, upper = self.bounds[self.dataset]
self.lower_bound = lower
self.upper_bound = upper
@param.depends('lower_bound', 'upper_bound', watch=True)
def _bounds_updated(self):
# if self.global_bounds:
# for k in self.bounds.keys():
# self.bounds[k] = (self.lower_bound, self.upper_bound)
if not self.global_bounds:
self.bounds[self.dataset] = (self.lower_bound, self.upper_bound)
def _parent_datasets_updated(self, events):
if len(self.parent.data_objects) > 0:
self.param['do_fit1'].constant = False
# keys to remove:
for k in self.bounds.keys() - self.parent.data_objects.keys():
self.bounds.pop(k)
# keys to add:
for k in self.parent.data_objects.keys() - self.bounds.keys():
self.bounds[k] = get_bounds(self.parent.data_objects[k].timepoints)
options = list(self.parent.data_objects.keys())
self.param['dataset'].objects = options
if not self.dataset:
self.dataset = options[0]
def add_fit_result(self, future):
name = self._guess_names.pop(future.key)
results = future.result()
dfs = [result.output for result in results]
combined_results = pd.concat(dfs, axis=1,
keys=list(self.parent.data_objects.keys()),
names=['state_name', 'quantity'])
self.sources['dataframe'].add_df(combined_results, 'rates', name)
self.parent.fit_results[name] = {k: v for k, v in zip(self.parent.data_objects.keys(), results)}
self.parent.param.trigger('data_objects') # Informs other fittings that initial guesses are now available
self.param['do_fit1'].constant = False
def _action_fit(self):
if len(self.parent.data_objects) == 0:
self.parent.logger.info('No datasets loaded')
return
if self.guess_name in itertools.chain(self.parent.fit_results.keys(), self._guess_names.values()):
self.parent.logger.info(f"Guess with name {self.guess_name} already in use")
return
self.parent.logger.debug('Start initial guess fit')
self.param['do_fit1'].constant = True
num_samples = len(self.parent.data_objects)
if self.fitting_model.lower() in ['association', 'dissociation']:
if self.global_bounds:
bounds = [(self.lower_bound, self.upper_bound)]*num_samples
else:
bounds = self.bounds.values()
futures = self.parent.client.map(fit_rates_weighted_average,
self.parent.data_objects.values(), bounds, client='worker_client')
elif self.fitting_model == 'Half-life (λ)': # this is practically instantaneous and does not require dask
futures = self.parent.client.map(fit_rates_half_time_interpolate, self.parent.data_objects.values())
dask_future = self.parent.client.submit(lambda args: args, futures) #combine multiple futures into one future
self._guess_names[dask_future.key] = self.guess_name
self.parent.future_queue.append((dask_future, self.add_fit_result))
class FitControl(ControlPanel):
header = 'Fitting'
initial_guess = param.Selector(doc='Name of dataset to use for initial guesses.')
fit_mode = param.Selector(default='Batch', objects=['Batch', 'Single'])
stop_loss = param.Number(STOP_LOSS, bounds=(0, None),
doc='Threshold loss difference below which to stop fitting.')
stop_patience = param.Integer(PATIENCE, bounds=(1, None),
doc='Number of epochs where stop loss should be satisfied before stopping.')
learning_rate = param.Number(optimizer_defaults['SGD']['lr'], bounds=(0, None),
doc='Learning rate parameter for optimization.')
momentum = param.Number(optimizer_defaults['SGD']['momentum'], bounds=(0, None),
doc='Stochastic Gradient Descent momentum')
nesterov = param.Boolean(optimizer_defaults['SGD']['nesterov'],
doc='Use Nesterov type of momentum for SGD')
epochs = param.Integer(EPOCHS, bounds=(1, None),
doc='Maximum number of epochs (iterations.')
r1 = param.Number(R1, bounds=(0, None), label='Regularizer 1 (peptide axis)',
doc='Value of the regularizer along residue axis.')
r2 = param.Number(R2, bounds=(0, None), label='Regularizer 2 (sample axis)',
doc='Value of the regularizer along sample axis.', constant=True)
fit_name = param.String("Gibbs_fit_1", doc="Name for for the fit result")
do_fit = param.Action(lambda self: self._action_fit(), constant=True, label='Do Fitting',
doc='Start global fitting')
def __init__(self, parent, **params):
self.pbar1 = ASyncProgressBar() #tqdm?
super(FitControl, self).__init__(parent, **params)
source = self.parent.sources['dataframe']
source.param.watch(self._source_updated, ['updated'])
self._current_jobs = 0
self._max_jobs = 2 #todo config
self._fit_names = {}
def _source_updated(self, *events):
table = self.parent.sources['dataframe'].get('rates')
objects = list(table.columns.levels[0])
if objects:
self.param['do_fit'].constant = False
self._fit_mode_updated()
self.param['initial_guess'].objects = objects
if not self.initial_guess and objects:
self.initial_guess = objects[0]
@param.depends('fit_mode', watch=True)
def _fit_mode_updated(self):
if self.fit_mode == 'Batch' and len(self.parent.data_objects) > 1:
self.param['r2'].constant = False
else:
self.param['r2'].constant = True
def add_fit_result(self, future):
#todo perhaps all these dfs should be in the future?
name = self._fit_names.pop(future.key)
result = future.result()
self._current_jobs -= 1
self.parent.logger.info(f'Finished PyTorch fit: {name}')
# List of single fit results
if isinstance(result, list):
self.parent.fit_results[name] = list(result)
output_dfs = {fit_result.hdxm_set.name: fit_result.output for fit_result in result}
df = pd.concat(output_dfs.values(), keys=output_dfs.keys(), axis=1)
# create mse losses dataframe
dfs = {}
for single_result in result:
# Determine mean squared errors per peptide, summed over timepoints
mse = single_result.get_mse()
mse_sum = np.sum(mse, axis=1)
peptide_data = single_result.hdxm_set[0].data
data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum}
dfs[single_result.hdxm_set.name] = pd.DataFrame(data_dict)
mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)
#todo d calc for single fits
#todo losses for single fits
# Create d_calc dataframe
# -----------------------
# todo needs cleaning up
state_dfs = {}
for single_result in result:
tp_flat = single_result.hdxm_set.timepoints
elem = tp_flat[np.nonzero(tp_flat)]
time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)
d_calc_state = single_result(time_vec) #shape Np x Nt
hdxm = single_result.hdxm_set
peptide_dfs = []
pm_data = hdxm[0].data
for d_peptide, pm_row in zip(d_calc_state, pm_data):
peptide_id = f"{pm_row['start']}_{pm_row['end']}"
data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}
peptide_dfs.append(pd.DataFrame(data_dict))
state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)
d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)
# Create losses/epoch dataframe
# -----------------------------
losses_dfs = {fit_result.hdxm_set.name: fit_result.losses for fit_result in result}
losses_df = pd.concat(losses_dfs.values(), keys=losses_dfs.keys(), axis=1)
else: # one batchfit result
self.parent.fit_results[name] = result # todo this name can be changed by the time this is executed
df = result.output
# df.index.name = 'peptide index'
# Create MSE losses df (per peptide, summed over timepoints)
# -----------------------
mse = result.get_mse()
dfs = {}
for mse_sample, hdxm in zip(mse, result.hdxm_set):
peptide_data = hdxm[0].data
mse_sum = np.sum(mse_sample, axis=1)
# Indexing of mse_sum with Np to account for zero-padding
data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'total_mse': mse_sum[:hdxm.Np]}
dfs[hdxm.name] = pd.DataFrame(data_dict)
mse_df = pd.concat(dfs.values(), keys=dfs.keys(), axis=1)
self.parent.logger.info('Finished PyTorch fit')
# Create d_calc dataframe
# -----------------------
tp_flat = result.hdxm_set.timepoints.flatten()
elem = tp_flat[np.nonzero(tp_flat)]
time_vec = np.logspace(np.log10(elem.min()) - 1, np.log10(elem.max()), num=100, endpoint=True)
stacked = np.stack([time_vec for i in range(result.hdxm_set.Ns)])
d_calc = result(stacked)
state_dfs = {}
for hdxm, d_calc_state in zip(result.hdxm_set, d_calc):
peptide_dfs = []
pm_data = hdxm[0].data
for d_peptide, idx in zip(d_calc_state, pm_data.index):
peptide_id = f"{pm_data.loc[idx, 'start']}_{pm_data.loc[idx, 'end']}"
data_dict = {'timepoints': time_vec, 'd_calc': d_peptide, 'start_end': [peptide_id] * len(time_vec)}
peptide_dfs.append(pd.DataFrame(data_dict))
state_dfs[hdxm.name] = pd.concat(peptide_dfs, axis=0, ignore_index=True)
d_calc_df = pd.concat(state_dfs.values(), keys=state_dfs.keys(), axis=1)
# Create losses/epoch dataframe
# -----------------------------
losses_df = result.losses.copy()
losses_df.columns = pd.MultiIndex.from_product(
[['All states'], losses_df.columns],
names=['state_name', 'quantity']
)
self.parent.logger.info(
f"Finished fitting in {len(result.losses)} epochs, final mean squared residuals is {result.mse_loss:.2f}")
self.parent.logger.info(f"Total loss: {result.total_loss:.2f}, regularization loss: {result.reg_loss:.2f} "
f"({result.regularization_percentage:.1f}%)")
self.parent.sources['dataframe'].add_df(df, 'global_fit', names=[name])
self.parent.sources['dataframe'].add_df(mse_df, 'peptides_mse', names=[name])
self.parent.sources['dataframe'].add_df(d_calc_df, 'd_calc', names=[name])
self.parent.sources['dataframe'].add_df(losses_df, 'losses', names=[name])
self.parent.param.trigger('fit_results')
def _action_fit(self):
if self.fit_name in itertools.chain(self.parent.fit_results.keys(), self._fit_names.values()):
self.parent.logger.info(f"Fit result with name {self.fit_name} already in use")
return
self.parent.logger.info('Started PyTorch fit')
self._current_jobs += 1
if self._current_jobs >= self._max_jobs:
self.widgets['do_fit'].constant = True
self.parent.logger.info(f'Current number of active jobs: {self._current_jobs}')
if self.fit_mode == 'Batch':
hdx_set = self.parent.hdx_set
rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)
rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]
gibbs_guess = hdx_set.guess_deltaG(rates_guess)
dask_future = self.parent.client.submit(fit_gibbs_global_batch, hdx_set, gibbs_guess, **self.fit_kwargs)
else:
data_objs = self.parent.data_objects.values()
rates_df = self.sources['dataframe'].get('rates', fit_ID=self.initial_guess)
gibbs_guesses = [data_obj.guess_deltaG(rates_df[data_obj.name]['rate']) for data_obj in data_objs]
futures = self.parent.client.map(fit_gibbs_global, data_objs, gibbs_guesses, **self.fit_kwargs)
# Combine list of futures into one future object
# See https://github.com/dask/distributed/pull/560
dask_future = self.parent.client.submit(lambda args: args, futures)
self._fit_names[dask_future.key] = self.fit_name
self.parent.future_queue.append((dask_future, self.add_fit_result))
@property
def fit_kwargs(self):
fit_kwargs = dict(r1=self.r1, lr=self.learning_rate, momentum=self.momentum, nesterov=self.nesterov,
epochs=self.epochs, patience=self.stop_patience, stop_loss=self.stop_loss)
if self.fit_mode == 'Batch':
fit_kwargs['r2'] = self.r2
return fit_kwargs
class ClassificationControl(ControlPanel):
header = 'Classification'
# format ['tag1', ('tag2a', 'tag2b') ] = tag1 OR (tag2a AND tag2b)
# todo unify name for target field (target_data set)
# When coupling param with the same name together there should be an option to exclude this behaviour
table = param.Selector(label='Target table')
# fit_ID = param.Selector() # generalize selecting widgets based on selected table
# quantity = param.Selector(label='Quantity') # this is the lowest-level quantity of the multiindex df (filter??)
mode = param.Selector(default='Discrete', objects=['Discrete', 'Continuous', 'Color map'],
doc='Choose color mode (interpolation between selected colors).')#, 'ColorMap'])
num_colors = param.Integer(3, bounds=(1, 10), label='Number of colours',
doc='Number of classification colors.')
library = param.Selector(default='matplotlib', objects=['matplotlib', 'colorcet'])
color_map = param.Selector()
otsu_thd = param.Action(lambda self: self._action_otsu(), label='Otsu',
doc="Automatically perform thresholding based on Otsu's method.")
linear_thd = param.Action(lambda self: self._action_linear(), label='Linear',
doc='Automatically perform thresholding by creating equally spaced sections.')
log_space = param.Boolean(False,
doc='Boolean to set whether to apply colors in log space or not.')
no_coverage = param.Color(default='#8c8c8c', doc='Color to use for regions of no coverage')
color_set_name = param.String('', doc='Name for the color dataset to add')
add_colorset = param.Action(lambda self: self._action_add_colorset())
values = param.List(default=[], precedence=-1)
colors = param.List(default=[], precedence=-1)
def __init__(self, parent, **param):
super(ClassificationControl, self).__init__(parent, **param)
cc_cmaps = sorted(colorcet.cm.keys())
mpl_cmaps = sorted(set(plt.colormaps()) - set('cet_' + cmap for cmap in cc_cmaps))
self.cmaps = {'matplotlib': mpl_cmaps, 'colorcet': cc_cmaps}
self.param['color_map'].objects = mpl_cmaps
self._update_num_colors()
self._update_num_values()
self.excluded = ['library', 'color_map']
views = [view for view in self.views.values() if any(isinstance(trs, ApplyCmapTransform) for trs in view.transforms)]
options = [view.table for view in views]
for view in views:
view.source.param.watch(self._sources_updated, 'updated')
self.param['table'].objects = options
if not self.table and options:
self.table = options[0]
self._table_updated()
@property
def own_widget_names(self):
initial_widgets = []
for name in self.param:
precedence = self.param[name].precedence
if (precedence is None or precedence > 0) and name not in self.excluded + ['name']:
initial_widgets.append(name)
select_widgets = [name for name in self.widgets.keys() if name.startswith('select')]
initial_widgets[1:1] = select_widgets
widget_names = initial_widgets + [f'value_{i}' for i in range(len(self.values))]
if self.mode != 'Color map':
widget_names += [f'color_{i}' for i in range(len(self.colors))]
return widget_names
colors=pn.widgets.IntInput)
@property
def _layout(self):
return [
('self', self.own_widget_names),
]
def _sources_updated(self, *events):
self._table_updated()
@param.depends('table', watch=True)
def _table_updated(self):
df = self.get_data()
if df.empty:
return
names = df.columns.names
old_widget_names = [key for key in self.widgets.keys() if key.startswith('select')]
[self.widgets.pop(key) for key in old_widget_names]
widget_dict = {}
for i, (name, options) in enumerate(zip(names, df.columns.levels)):
_opts = ['*'] + list(options) if i != len(names) - 1 else list(options)
if i == 0:
default = _opts[-1]
else:
default = 'deltaG' if 'deltaG' in _opts else _opts[0]
widget = pn.widgets.Select(name=name, options=_opts, value=default)
widget_dict[f'select_{i}'] = widget
self.widgets.update(widget_dict)
self.update_box()
def get_data(self):
source = self.sources['dataframe']
df = source.get(self.table)
return df
def get_selected_data(self):
df = self.get_data()
selected_fields = [widget.value for name, widget in self.widgets.items() if name.startswith('select')]
bools_list = [df.columns.get_level_values(i) == value for i, value in enumerate(selected_fields) if
value != '*']
if len(bools_list) == 0:
bools = np.ones(len(df.columns)).astype(bool)
elif len(bools_list) == 1:
bools = np.array(bools_list).flatten()
else:
bools_array = np.array(bools_list)
bools = np.product(bools_array, axis=0).astype(bool)
selected_df = df.iloc[:, bools]
return selected_df
def get_values(self):
array = self.get_selected_data().to_numpy().flatten()
values = array[~np.isnan(array)]
return values
def _action_otsu(self):
if self.num_colors <= 1:
return
values = self.get_values()
if not values.size:
return
func = np.log if self.log_space else lambda x: x
thds = threshold_multiotsu(func(values), classes=self.num_colors)
widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]
for thd, widget in zip(thds[::-1], widgets):
widget.start = None
widget.end = None
widget.value = np.exp(thd) if self.log_space else thd
self._update_bounds()
def _action_linear(self):
i = 1 if self.mode == 'Discrete' else 0
values = self.get_values()
if not values.size:
return
if self.log_space:
thds = np.logspace(np.log(np.min(values)), np.log(np.max(values)),
num=self.num_colors + i, endpoint=True, base=np.e)
else:
thds = np.linspace(np.min(values), np.max(values), num=self.num_colors + i, endpoint=True)
widgets = [widget for name, widget in self.widgets.items() if name.startswith('value')]
for thd, widget in zip(thds[i:self.num_colors][::-1], widgets):
widget.start = None
widget.end = None
widget.value = thd
self._update_bounds()
def _action_add_colorset(self):
if not self.color_set_name:
self.parent.logger.info('No name given tot the colorset')
return
source = self.sources['dataframe']
if self.color_set_name in source.tables.keys():
self.parent.logger.info(f'Colorset with name {self.color_set_name} already present')
return
selected_df = self.get_selected_data()
cmap, norm = self.get_cmap_and_norm()
array = cmap(norm(selected_df), bytes=True)
colors_hex = rgb_to_hex(array.reshape(-1, 4))
output = colors_hex.reshape(array.shape[:-1])
output_df = pd.DataFrame(output, index=selected_df.index, columns=selected_df.columns)
if output_df.index.name == 'r_number':
c_term = max([data_obj.coverage.protein.c_term for data_obj in self.parent.data_objects.values()])
n_term = min([data_obj.coverage.protein.n_term for data_obj in self.parent.data_objects.values()])
new_index = pd.RangeIndex(start=n_term, stop=c_term, name='r_number')
output_df = output_df.reindex(index=new_index, fill_value=self.no_coverage.upper())
output_df.rename_axis(columns={'fit_ID': 'color_ID'}, inplace=True)
output_df.columns = output_df.columns.set_levels([self.color_set_name], level=0)
source.add_df(output_df, 'colors')
@param.depends('color_map', 'values', 'colors', watch=True)
def _action_apply(self):
cmap, norm = self.get_cmap_and_norm()
if cmap and norm:
transform = self.transforms['cmap_transform']
transform.cmap = cmap
transform.norm = norm
def get_cmap_and_norm(self):
norm_klass = mpl.colors.Normalize if not self.log_space else mpl.colors.LogNorm
if len(self.values) < 2:
return None, None
if self.mode == 'Discrete':
if len(self.values) != len(self.colors) - 1:
return None, None
cmap = mpl.colors.ListedColormap(self.colors)
norm = mpl.colors.BoundaryNorm(self.values[::-1], self.num_colors, extend='both')
elif self.mode == 'Continuous':
norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)
positions = norm(self.values[::-1])
cmap = mpl.colors.LinearSegmentedColormap.from_list('custom_cmap', list(zip(positions, self.colors)))
elif self.mode == 'Color map':
norm = norm_klass(vmin=np.min(self.values), vmax=np.max(self.values), clip=True)
if self.library == 'matplotlib':
cmap = mpl.cm.get_cmap(self.color_map)
elif self.library == 'colorcet':
cmap = getattr(colorcet, 'm_' + self.color_map)
cmap.set_bad(self.no_coverage)
return cmap, norm
@param.depends('library', watch=True)
def _update_library(self):
options = self.cmaps[self.library]
self.param['color_map'].objects = options
@param.depends('mode', watch=True)
def _mode_updated(self):
if self.mode == 'Discrete':
self.excluded = ['library', 'color_map']
elif self.mode == 'Continuous':
self.excluded = ['library', 'color_map', 'otsu_thd']
elif self.mode == 'Color map':
self.excluded = ['otsu_thd', 'num_colors']
self.num_colors = 2
self.param.trigger('num_colors')
self.update_box()
@param.depends('num_colors', watch=True)
def _update_num_colors(self):
while len(self.colors) != self.num_colors:
if len(self.colors) > self.num_colors:
self._remove_color()
elif len(self.colors) < self.num_colors:
self._add_color()
self.param.trigger('colors')
@param.depends('num_colors', watch=True)
def _update_num_values(self):
diff = 1 if self.mode == 'Discrete' else 0
while len(self.values) != self.num_colors - diff:
if len(self.values) > self.num_colors - diff:
self._remove_value()
elif len(self.values) < self.num_colors - diff:
self._add_value()
self._update_bounds()
self.param.trigger('values')
self.update_box()
def _add_value(self):
try:
first_value = self.values[-1]
except IndexError:
first_value = 0
default = float(first_value - 1)
self.values.append(default)
name = f'Threshold {len(self.values)}'
key = f'value_{len(self.values) - 1}'
widget = pn.widgets.FloatInput(name=name, value=default)
self.widgets[key] = widget
widget.param.watch(self._value_event, ['value'])
def _remove_value(self):
key = f'value_{len(self.values) - 1}'
widget = self.widgets.pop(key)
self.values.pop()
[widget.param.unwatch(watcher) for watcher in widget.param._watchers]
del widget
def _add_color(self):
try:
default = DEFAULT_CLASS_COLORS[len(self.colors)]
except IndexError:
default = "#"+''.join(np.random.choice(list('0123456789abcdef'), 6))
self.colors.append(default)
key = f'color_{len(self.colors) - 1}'
widget = pn.widgets.ColorPicker(value=default)
self.widgets[key] = widget
widget.param.watch(self._color_event, ['value'])
def _remove_color(self):
key = f'color_{len(self.colors) - 1}'
widget = self.widgets.pop(key)
self.colors.pop()
[widget.param.unwatch(watcher) for watcher in widget.param._watchers]
del widget
def _color_event(self, *events):
for event in events:
idx = list(self.widgets.values()).index(event.obj)
key = list(self.widgets.keys())[idx]
widget_index = int(key.split('_')[1])
self.colors[widget_index] = event.new
self.param.trigger('colors')
def _value_event(self, *events):
for event in events:
idx = list(self.widgets.values()).index(event.obj)
key = list(self.widgets.keys())[idx]
widget_index = int(key.split('_')[1])
self.values[widget_index] = event.new
self._update_bounds()
self.param.trigger('values')
def _update_bounds(self):
for i in range(len(self.values)):
widget = self.widgets[f'value_{i}']
if i > 0:
key = f'value_{i-1}'
prev_value = float(self.widgets[key].value)
widget.end = np.nextafter(prev_value, prev_value - 1)
else:
widget.end = None
if i < len(self.values) - 1:
key = f'value_{i+1}'
next_value = float(self.widgets[key].value)
widget.start = np.nextafter(next_value, next_value + 1)
else:
widget.start = None
class ProteinControl(ControlPanel):
header = 'Protein Control'
input_mode = param.Selector(doc='Method of protein structure input', objects=['PDB File', 'RCSB Download'])
file_binary = param.Parameter()
rcsb_id = param.String(doc='RCSB ID of protein to download')
load_structure = param.Action(lambda self: self._action_load_structure())
def __init__(self, parent, **params):
super(ProteinControl, self).__init__(parent, **params)
excluded = ['rcsb_id']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
@property
def _layout(self):
return [('self', self.own_widget_names),
('filters.ngl_color_id', None),
('filters.ngl_state_name', None),
]
def make_dict(self):
return self.generate_widgets(file_binary=pn.widgets.FileInput(multiple=False, accept='.pdb'))
@param.depends('input_mode', watch=True)
def _update_input_mode(self):
if self.input_mode == 'PDB File':
excluded = ['rcsb_id']
elif self.input_mode == 'RCSB Download':
excluded = ['file_binary']
self.own_widget_names = [name for name in self.widgets.keys() if name not in excluded]
self.update_box()
def _action_load_structure(self):
view = self.views['protein']
if self.input_mode == 'PDB File':
pdb_string = self.file_binary.decode()
view.ngl_view.pdb_string = pdb_string
elif self.input_mode == 'RCSB Download':
if len(self.rcsb_id) != 4:
self.parent.logger.info(f"Invalid RCSB pdb id: {self.rcsb_id}")
return
url = f'http://files.rcsb.org/download/{self.rcsb_id}.pdb'
with urllib.request.urlopen(url) as response:
pdb_string = response.read().decode()
view.ngl_view.pdb_string = pdb_string
class GraphControl(ControlPanel):
header = 'Graph Control'
spin = param.Boolean(default=False, doc='Spin the protein object')
state_name = param.Selector(doc="Name of the currently selected state")
fit_id = param.Selector(doc="Name of the currently selected fit ID")
peptide_index = param.Selector(doc="Index of the currently selected peptide")
def __init__(self, parent, **params):
super(GraphControl, self).__init__(parent, **params)
source = self.sources['dataframe']
source.param.watch(self._source_updated, 'updated')
def make_dict(self):
widgets = {
'general': pn.pane.Markdown('### General'),
'coverage': pn.pane.Markdown('### Coverage'),
'peptide': pn.pane.Markdown('### Peptide'),
'losses': pn.pane.Markdown('### Losses'),
'debugging': pn.pane.Markdown('### Debugging'),
}
return {**widgets, **self.generate_widgets()}
def _source_updated(self, *events):
source = self.sources['dataframe']
table = source.get('global_fit')
fit_id_options = list(table.columns.get_level_values(0).unique())
self.param['fit_id'].objects = fit_id_options
if not self.fit_id and fit_id_options:
self.fit_id = fit_id_options[0]
table = source.get('peptides')
state_name_options = list(table.columns.get_level_values(0).unique())
self.param['state_name'].objects = state_name_options
if not self.state_name and state_name_options:
self.state_name = state_name_options[0]
@param.depends('state_name', watch=True)
def _update_state_name(self):
ate_name', 'coverage_mse_state_name', 'peptide_d_exp_state_name', 'peptide_d_calc_state_name',
'deltaG_state_name', 'rates_state_name', 'ngl_state_name']
for dwarf in dwarfs:
filt = self.filters[dwarf]
filt.value = self.state_name
losses_filt = self.filters['losses_state_name']
if self.state_name in losses_filt.param['value'].objects:
losses_filt.value = self.state_name
source = self.sources['dataframe']
table = source.get('peptides')
unique_vals = table[self.state_name]['start_end'].unique()
peptide_options = list(range(len(unique_vals)))
self.param['peptide_index'].objects = peptide_options
if self.peptide_index is not None and peptide_options:
self.peptide_index = peptide_options[0]
@param.depends('fit_id', watch=True)
def _update_fit_id(self):
elves = ['coverage_mse_fit_id', 'peptide_d_calc_fit_id', 'deltaG_fit_id', 'losses_fit_id']
for elf in elves:
filt = self.filters[elf]
filt.value = self.fit_id
@param.depends('peptide_index', watch=True)
def _update_peptide_index(self):
hobbits = ['peptide_d_exp_select', 'peptide_d_calc_select']
for hobbit in hobbits:
filt = self.filters[hobbit]
filt.value = self.peptide_index
@property
def _layout(self):
return [
('self', ['general']),
('self', ['fit_id', 'state_name']),
('self', ['coverage']),
('filters.coverage_exposure', None),
('self', ['peptide', 'peptide_index']),
('self', ['losses']),
('filters.losses_state_name', None),
]
@param.depends('spin', watch=True)
def _spin_updated(self):
view = self.views['protein']
view.ngl_view.spin = self.spin
class FileExportControl(ControlPanel):
header = "File Export"
table = param.Selector(label='Target dataset', doc='Name of the dataset to export')
export_format = param.Selector(default='csv', objects=['csv', 'pprint'],
doc="Format of the exported tables."
"'csv' is machine-readable, 'pprint' is human-readable format")
def __init__(self, parent, **param):
super(FileExportControl, self).__init__(parent, **param)
objects = list(self.sources['dataframe'].tables.keys())
self.param['table'].objects = objects
self.table = objects[0]
self.sources['dataframe'].param.watch(self._source_updated, 'updated')
def make_dict(self):
widgets = self.generate_widgets()
widgets['export_tables'] = pn.widgets.FileDownload(
label='Download table',
callback=self.table_export_callback
)
widgets['export_pml'] = pn.widgets.FileDownload(label='Download pml scripts',
callback=self.pml_export_callback,
)
return widgets
@property
def _layout(self):
return [
('self', None)
]
def _source_updated(self, *events):
self.param['table'].objects = list(self.sources['dataframe'].tables.keys())
self._table_updated()
@param.depends('table', 'export_format', watch=True)
def _table_updated(self):
self.df = self.sources['dataframe'].get(self.table)
ext = '.csv' if self.export_format == 'csv' else '.txt'
self.widgets['export_tables'].filename = self.table + ext
if self.table == 'colors':
self.widgets['export_pml'].disabled = False
self.widgets['export_pml'].filename = self.table + '_pml_scripts.zip'
else:
self.widgets['export_pml'].disabled = True
@pn.depends('table')
def pml_export_callback(self):
if self.table:
bio = BytesIO()
with zipfile.ZipFile(bio, 'w') as pml_zip:
for col_name in self.df.columns:
name = col_name if isinstance(col_name, str) else '_'.join(col_name)
colors = self.df[col_name]
pml_script = series_to_pymol(colors)
pml_zip.writestr(name + '.pml', pml_script)
bio.seek(0)
return bio
@pn.depends('table')
def table_export_callback(self):
if self.table:
io = dataframe_to_stringio(self.df, fmt=self.export_format)
return io
else:
return None
class SingleMappingFileInputControl(MappingFileInputControl):
def _action_add_dataset(self):
super()._action_add_dataset()
to_add_keys = set(self.parent.datasets.keys()) - set(self.parent.sources.keys())
for key in to_add_keys:
records = self.parent.datasets[key].to_records()
data_source = DataSource(records, tags=['comparison', 'mapping'], x='r_number',
renderer='circle', size=10)
self.parent.publish_data(key, data_source)
class MatrixMappingFileInputControl(SingleMappingFileInputControl):
datapoints = param.ListSelector(doc='Select datapoints to include in the matrix')
def _action_add_dataset(self):
super()._action_add_dataset()
N = 20
img = np.empty((N, N), dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((N, N, 4))
for i in range(N):
for j in range(N):
view[i, j, 0] = int(i / N * 255)
view[i, j, 1] = 158
view[i, j, 2] = int(j / N * 255)
view[i, j, 3] = 255
values = np.random.random(img.shape)
img_ds_dict = {'img': [img], 'scores': [values]}
data_source = DataSource(img_ds_dict, tags=['image'], name='scores_image', x=0, y=0)
self.parent.publish_data('scores_image', data_source)
def make_list(self):
widget_list = super().make_list()
datapoints_widget = widget_list.pop()
widget_list.insert(3, datapoints_widget)
return widget_list
def _add_dataset(self):
full_dict = self.protein.to_dict()
data_dict = {k: v for k, v in full_dict.items() if k in self.datapoints}
data_dict['r_number'] = self.protein.index
protein = Protein(data_dict, index='r_number')
self.parent.datasets[self.dataset_name] = protein
@param.depends('input_file', watch=True)
def _input_file_updated(self):
super()._input_file_updated()
if self.input_file:
header_fields = self.protein.df.columns
float_fields = [f for f in header_fields if f.replace('.', '', 1).isdigit()]
self.param['datapoints'].objects = float_fields
self.datapoints = float_fields
class MatrixImageControl(ControlPanel):
class FDPeptideFileInputControl(PeptideFileInputControl):
def make_list(self):
parameters = ['add_button', 'clear_button', 'drop_first', 'load_button', 'd_percentage',
'fd_state', 'fd_exposure', 'parse_button']
first_widgets = list([self.widget_dict[par] for par in parameters])
return self.file_selectors + first_widgets
def _action_parse(self):
pmt = self.parent.peptides
data_states = pmt.data[pmt.data['state'] == self.fd_state]
data_exposure = data_states[data_states['exposure'] == self.fd_exposure]
scores = 100 * data_exposure['uptake'] / data_exposure['ex_residues']
data_final = append_fields(data_exposure, 'scores', data=scores, usemask=False)
series = HDXMeasurement(data_final)
self.parent.series = series
self.parent.logger.info(f"Loaded FD control '{self.exp_state}' with {len(series.coverage)} peptides")
self.parent.logger.info(f'Mean deuteration is {scores.mean()}%, std {scores.std()}%')
class PeptideFoldingFileInputControl(PeptideFileInputControl):
be_mode = param.Selector(doc='Select method of normalization', label='Norm mode', objects=['Exp', 'Theory']
, precedence=-1)
fd_state = param.Selector(doc='State used to normalize uptake', label='100% Control State')
fd_exposure = param.Selector(doc='Exposure used to normalize uptake', label='100% Control Exposure')
zero_state = param.Selector(doc='State used to zero uptake', label='0% Control State')
zero_exposure = param.Selector(doc='Exposure used to zero uptake', label='0% Control Exposure')
def make_dict(self):
return self.generate_widgets()
def make_list(self):
parameters = ['add_button', 'clear_button', 'drop_first', 'ignore_prolines', 'load_button',
'fd_state', 'fd_exposure', 'zero_state', 'zero_exposure', 'exp_state',
'exp_exposures', 'parse_button']
first_widgets = list([self.widget_dict[par] for par in parameters])
return self.file_selectors + first_widgets
def _action_load(self):
super()._action_load()
states = list(np.unique(self.parent.peptides.data['state']))
self.param['zero_state'].objects = states
self.zero_state = states[0]
@param.depends('fd_state', 'fd_exposure', watch=True)
def _update_experiment(self):
pm_dict = self.parent.peptides.return_by_name(self.fd_state, self.fd_exposure)
states = list(np.unique([v.state for v in pm_dict.values()]))
self.param['exp_state'].objects = states
self.exp_state = states[0] if not self.exp_state else self.exp_state
@param.depends('zero_state', watch=True)
def _update_zero_exposure(self):
b = self.parent.peptides.data['state'] == self.zero_state
data = self.parent.peptides.data[b]
exposures = list(np.unique(data['exposure']))
self.param['zero_exposure'].objects = exposures
if exposures:
self.control_exposure = exposures[0]
def _action_parse(self):
control_0 = self.zero_state, self.zero_exposure
self.parent.peptides.set_control((self.fd_state, self.fd_exposure), control_0=control_0)
data_states = self.parent.peptides.data[self.parent.peptides.data['state'] == self.exp_state]
data = data_states[np.isin(data_states['exposure'], self.exp_exposures)]
series = HDXMeasurement(data)
self.parent.series = series
self._publish_scores()
self.parent.logger.info(f'Loaded experiment state {self.exp_state} '
f'({len(series)} timepoints, {len(series.coverage)} peptides each)')
class DifferenceControl(ControlPanel):
header = 'Differences'
dataset_1 = param.Selector(doc='First dataset to compare')
dataset_2 = param.Selector(doc='Second dataset to compare')
comparison_name = param.String()
operation = param.Selector(default='Subtract', objects=['Subtract', 'Divide'],
doc='Select the operation to perform between the two datasets')
comparison_quantity = param.Selector(doc="Select a quantity to compare (column from input txt file)")
add_comparison = param.Action(lambda self: self._action_add_comparison(),
doc='Click to add this comparison to available comparisons')
comparison_list = param.ListSelector(doc='Lists available comparisons')
remove_comparison = param.Action(lambda self: self._action_remove_comparison(),
doc='Remove selected comparisons from the list')
def __init__(self, parent, **params):
super(DifferenceControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def _datasets_updated(self, events):
objects = list(self.parent.datasets.keys())
self.param['dataset_1'].objects = objects
if not self.dataset_1:
self.dataset_1 = objects[0]
self.param['dataset_2'].objects = objects
if not self.dataset_2:nds('dataset_1', 'dataset_2', watch=True)
def _selection_updated(self):
if self.datasets:
unique_names = set.intersection(*[{name for name in protein.df.dtypes.index} for protein in self.datasets])
objects = [name for name in unique_names if np.issubdtype(self.protein_1[name].dtype, np.number)]
objects.sort()
self.param['comparison_quantity'].objects = objects
if self.comparison_quantity is None:
self.comparison_quantity = objects[0]
@property
def protein_1(self):
try:
return self.parent.datasets[self.dataset_1]
except KeyError:
return None
@property
def protein_2(self):
try:
return self.parent.datasets[self.dataset_2]
except KeyError:
return None
@property
def datasets(self):
datasets = (self.protein_1, self.protein_2)
if None in datasets:
return None
else:
return datasets
def _action_add_comparison(self):
if not self.comparison_name:
self.parent.logger.info('The added comparison needs to have a name')
return
if self.datasets is None:
return
op = {'Subtract': operator.sub, 'Divide': operator.truediv}[self.operation]
comparison = op(*[p[self.comparison_quantity] for p in self.datasets]).rename('comparison')
value1 = self.protein_1[self.comparison_quantity].rename('value1')
value2 = self.protein_2[self.comparison_quantity].rename('value2')
df = pd.concat([comparison, value1, value2], axis=1)
output = df.to_records()
data_source = DataSource(output, tags=['comparison', 'mapping'], x='r_number', y='comparison',
renderer='circle', size=10)
self.parent.publish_data(self.comparison_name, data_source)
self.comparison_name = ''
def _action_remove_comparison(self):
for comparison in self.comparison_list:
self.parent.sources.pop(comparison)
self.parent.param.trigger('sources')
@param.depends('parent.sources', watch=True)
def _update_comparison_list(self):
objects = [name for name, d in self.parent.sources.items() if 'comparison' in d.tags]
self.param['comparison_list'].objects = objects
class SingleControl(ControlPanel):
header = 'Datasets'
dataset = param.Selector(doc='Dataset')
dataset_name = param.String(doc='Name of the dataset to add')
quantity = param.Selector(doc="Select a quantity to plot (column from input txt file)")
add_dataset = param.Action(lambda self: self._action_add_dataset(),
doc='Click to add this comparison to available comparisons')
dataset_list = param.ListSelector(doc='Lists available comparisons')
remove_dataset = param.Action(lambda self: self._action_remove_comparison(),
doc='Remove selected datasets from available datasets')
def __init__(self, parent, **params):
super(SingleControl, self).__init__(parent, **params)
self.parent.param.watch(self._datasets_updated, ['datasets'])
def _datasets_updated(self, events):
objects = list(self.parent.datasets.keys())
self.param['dataset'].objects = objects
if not self.dataset:
self.dataset = objects[0]
@param.depends('dataset', watch=True)
def _selection_updated(self):
if self.dataset:
dataset = self.parent.datasets[self.dataset]
names = dataset.dtype.names
objects = [name for name in names if name != 'r_number']
self.param['quantity'].objects = objects
if self.quantity is None:
self.quantity = objects[0]
def _action_add_dataset(self):
if not self.dataset_name:
self.parent.logger.info('The added comparison needs to have a name')
return
if not self.dataset:
return
array = self.parent.datasets[self.dataset]
data_source = DataSource(array, tags=['comparison', 'mapping'], x='r_number', y=self.quantity,
renderer='circle', size=10)
self.parent.publish_data(self.dataset_name, data_source)
self.comparison_name = ''
def _action_remove_comparison(self):
for ds in self.dataset_list:
self.parent.sources.pop(ds)
self.parent.param.trigger('sources')
@param.depends('parent.sources', watch=True)
def _update_dataset_list(self):
objects = [name for name, d in self.parent.sources.items()]
self.param['dataset_list'].objects = objects
class FDCoverageControl(CoverageControl):
def make_list(self):
lst = super(CoverageControl, self).make_list()
return lst[:-1]
class FoldingFitting(InitialGuessControl):
fitting_model = param.Selector(default='Dissociation', objects=['Dissociation'],
doc='Choose method for determining initial guesses.')
def make_list(self):
self.widget_dict.update(pbar1=self.pbar1.view, pbar2=self.pbar2.view)
parameters = ['fitting_model', 'lower_bound', 'upper_bound', 'do_fit1', 'pbar1']
widget_list = list([self.widget_dict[par] for par in parameters])
return widget_list
class FitResultControl(ControlPanel):
header = 'Fit Results'
peptide_index = param.Integer(0, bounds=(0, None),
doc='Index of the peptide to display.')
x_axis_type = param.Selector(default='Log', objects=['Linear', 'Log'],
doc='Choose whether to plot the x axis as Logarithmic axis or Linear.')
def __init__(self, parent, **param):
super(FitResultControl, self).__init__(parent, **param)
self.d_uptake = {} s_updated, ['datasets'])
self.parent.param.watch(self._fit_results_updated, ['fit_results'])
def _series_updated(self, *events):
pass
@property
def fit_timepoints(self):
time = np.logspace(-2, np.log10(self.parent.series.timepoints.max()), num=250)
time = np.insert(time, 0, 0.)
return time
def _fit_results_updated(self, *events):
accepted_fitresults = ['fr_pfact']
for name, fit_result in self.parent.fit_results.items():
if name in accepted_fitresults:
D_upt = fit_result(self.fit_timepoints)
self.d_uptake[name] = D_upt
else:
continue
self._update_sources()
@param.depends('peptide_index', watch=True)
def _update_sources(self):
for name, array in self.d_uptake.items():
if name == 'uptake_corrected': oints = self.parent.series.timepoints
renderer = 'circle'
color = '#000000'
else:
timepoints = self.fit_timepoints
renderer = 'line'
color = '#bd0d1f'
dic = {'time': timepoints, 'uptake': array[self.peptide_index, :]}
data_source = DataSource(dic, x='time', y='uptake', tags=['uptake_curve'], renderer=renderer, color=color)
self.parent.publish_data(name, data_source)
class ColoringControl(ClassificationControl):
def make_dict(self):
widgets_dict = super().make_dict()
widgets_dict.pop('quantity')
return widgets_dict
@param.depends('values', 'colors', 'target', 'quantity', watch=True)
def _get_colors(self):
if np.all(self.values == 0):
return
elif np.any(np.diff(self.values) > 0):
return
elif not self.target:
return
elif 'scores_image' not in self.parent.sources.keys():
return
tgt_source = self.parent.sources[self.target]
r_number = tgt_source.source.data['r_number']
assert np.all(np.diff(r_number) == 1)
headers = [f for f in tgt_source.source.data.keys() if f.replace('.', '', 1).isdigit()]
headers.sort(key=float)
timepoints = np.array([float(f) for f in headers])
N_interpolate = 500
interp_timepoints = np.linspace(0, timepoints.max(), num=N_interpolate, endpoint=True)
data_array = np.stack([tgt_source.source.data[k] for k in headers])
array = np.stack([np.interp(interp_timepoints, timepoints, data) for data in data_array.T]).T
colors_hex = self._calc_colors(array.flatten())
if colors_hex is None:
return
colors_hex[colors_hex == 'nan'] = '#8c8c8c'
colors_rgba = np.array([hex_to_rgba(h) for h in colors_hex])
shape = (N_interpolate, len(r_number))
img = np.empty(shape, dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape(*shape, 4)
view[:] = colors_rgba.reshape(*shape, 4)
img_source = self.parent.sources['scores_image']
img_source.render_kwargs['dw'] = r_number.max()
img_source.render_kwargs['dh'] = timepoints.max()
img_source.source.data.update(img=[img], scores=[array])
class DifferenceFileExportControl(FileExportControl):
accepted_tags = ['mapping']
def _sources_updated(self, *events):
data_sources = [k for k, src in self.parent.sources.items() if src.resolve_tags(self.accepted_tags)]
self.param['target'].objects = list(data_sources)
if not self.target and data_sources:
self.target = data_sources[-1]
@pn.depends('target', watch=True)
def _update_filename(self):
self.export_linear_download.filename = self.target + '_linear.txt'
if 'r_number' in self.export_dict.keys():
self.pml_script_download.filename = self.target + '_pymol.pml'
class OptionsControl(ControlPanel):
header = 'Options'
link_xrange = param.Boolean(True, doc='Link the X range of the coverage figure and other linear mapping figures.', constant=False)
log_level = param.Selector(default='DEBUG', objects=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', 'OFF', 'TRACE'],
doc='Set the logging level.')
def __init__(self, parent, **param):
super(OptionsControl, self).__init__(parent, **param)
@property
def enabled(self):
return self.master_figure is not None and self.client_figures is not None
@param.depends('link_xrange', watch=True)
def _update_link(self):
if self.enabled:
if self.link_xrange:
self._link()
else:
self._unlink()
@property
def client_figures(self):
client_names = ['RateFigure', 'PFactFigure']
return [self.parent.figure_panels[name].figure for name in client_names]
@property
def master_figure(self):
return self.parent.figure_panels['CoverageFigure'].figure
@property
def figures(self):
return [self.master_figure] + self.client_figures
def _unlink(self):
for fig in self.figures:
fig.x_range.js_property_callbacks.pop('change:start')
fig.x_range.js_property_callbacks.pop('change:end')
def _link(self):
for client in self.client_figures:
self.master_figure.x_range.js_link('start', client.x_range, 'start')
self.master_figure.x_range.js_link('end', client.x_range, 'end')
client.x_range.js_link('start', self.master_figure.x_range, 'start')
client.x_range.js_link('end', self.master_figure.x_range, 'end')
class DeveloperControl(ControlPanel):
header = 'Developer Options'
test_logging = param.Action(lambda self: self._action_test_logging())
breakpoint_btn = param.Action(lambda self: self._action_break())
test_btn = param.Action(lambda self: self._action_test())
trigger_btn = param.Action(lambda self: self._action_trigger())
print_btn = param.Action(lambda self: self._action_print())
runtime_warning = param.Action(lambda self: self._action_runtime())
def __init__(self, parent, **params):
super(DeveloperControl, self).__init__(parent, **params)
def _action_test_logging(self):
print(self.parent.logger)
self.parent.logger.debug('TEST DEBUG MESSAGE')
for i in range(20):
self.parent.logger.info('dit is een test123')
def _action_print(self):
hdx_set = self.parent.hdx_set
print(hdx_set.names)
guess = self.parent.control_panels['FitControl']
rates_df = self.sources['dataframe'].get('rates', fit_ID=guess.initial_guess)
print(guess.initial_guess)
print(rates_df)
rates_guess = [rates_df[state]['rate'] for state in hdx_set.names]
gibbs_guess = hdx_set.guess_deltaG(rates_guess)
def _action_break(self):
main_ctrl = self.parent
control_panels = main_ctrl.control_panels
views = main_ctrl.views
sources = main_ctrl.sources
mse_view = views['coverage_mse']
data = mse_view.get_data()
print('mse')
print(data)
coverage_view = views['coverage']
data = coverage_view.get_data()
print('coverage')
print(data)
print('Time for a break')
def _action_test(self):
src_file = r'C:\Users\jhsmi\pp\PyHDX\tests\test_data\ecSecB_torch_fit.txt'
array = txt_to_np(src_file)
data_dict = {name: array[name] for name in array.dtype.names}
data_dict['color'] = np.full_like(array, fill_value=DEFAULT_COLORS['pfact'], dtype='<U7')
data_source = DataSource(data_dict, x='r_number', tags=['mapping', 'pfact', 'deltaG'],
renderer='circle', size=10, name='global_fit')
self.parent.publish_data('global_fit', data_source)
def _action_trigger(self):
deltaG_figure = self.parent.figure_panels['DeltaGFigure']
deltaG_figure.bk_pane.param.trigger('object')
def _action_runtime(self):
result = np.mean([]) | true | true |
1c360e04d5d93c7aa7945e000a7c9be1dceb31d0 | 4,681 | py | Python | nrfupytesteos/nrfu_lag_status.py | wasabi222/nanog77-nrfu-tutorial | 43d5cfe488c3a231f03f979b47c3e61b021f2936 | [
"Apache-2.0"
] | 33 | 2019-10-29T18:32:48.000Z | 2022-03-20T19:57:59.000Z | nrfupytesteos/nrfu_lag_status.py | wasabi222/nanog77-nrfu-tutorial | 43d5cfe488c3a231f03f979b47c3e61b021f2936 | [
"Apache-2.0"
] | null | null | null | nrfupytesteos/nrfu_lag_status.py | wasabi222/nanog77-nrfu-tutorial | 43d5cfe488c3a231f03f979b47c3e61b021f2936 | [
"Apache-2.0"
] | 12 | 2019-10-25T19:36:44.000Z | 2021-12-07T10:21:07.000Z | # Copyright 2019 Jeremy Schulman, nwkautomaniac@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the NRFU test function for LAG interfaces. It ensures that only the interfaces
that are defined in the test case are actually present on the device.
Examples - testcase
--------
{
"test-case": "test-lag-status",
"dut": "ob050301.nyc1",
"params": {
"name": "Port-Channel2000"
},
"expected": {
"interfaces": [
"Ethernet51",
"Ethernet52"
]
}
}
Examples - "show lacp neighbor"
--------
{
"portChannels": {
"Port-Channel2000": {
"interfaces": {
"Ethernet51": {
"partnerPortPriority": 32768,
"partnerPortState": {
"collecting": true,
"distributing": true,
"synchronization": true,
"defaulted": false,
"timeout": false,
"activity": true,
"expired": false,
"aggregation": true
},
"partnerSystemId": "8000,76-83-ef-ed-66-5d",
"partnerOperKey": "0x000a",
"actorPortStatus": "bundled",
"partnerPortId": 15
},
"""
from nrfupytesteos import nrfu_exc as exc
TEST_CASE_NAME = "test-lag-status"
def make_testcase(dut, lag_name, interfaces):
return {
"test-case": TEST_CASE_NAME,
"dut": dut,
"params": {
"name": lag_name
},
"expected": {
"interfaces": interfaces
}
}
def snapshot_testdata(device):
return device.execute('show lacp neighbor')
def snapshot_testcases(device):
data = snapshot_testdata(device)
lags = data.get('portChannels')
return [
make_testcase(dut=device.hostname,
lag_name=lag_name,
interfaces=list(lag_data['interfaces']))
for lag_name, lag_data in lags.items()
]
def name_test(item):
""" used for pytest verbose output """
return f"{item['params']['name']}"
def test_lag_status(device, actual, testcase):
"""
Verifies the operational status of the LAG.
Parameters
----------
device: Device instance (unused)
actual: dict
The "show lacp neighbor" dataset
testcase: dict
The testcase dataset
Returns
-------
True when the test passes
Raises
------
MissingError:
When an expected interface is missing
UnexpectedError:
When an interface is present that does not belong
MismatchError:
When an interface is not in the "good" status
"""
lag_name = testcase['params']['name']
actual_lag = actual['portChannels'].get(lag_name)
if not actual_lag:
raise exc.MissingError(missing=lag_name)
actual_if_names = set(actual_lag['interfaces'])
exp_if_names = set(testcase['expected']['interfaces'])
# first see if there are any missing interfaces,
# if so raise a mismatch error.
missing_if_names = exp_if_names - actual_if_names
if missing_if_names:
raise exc.MismatchError(
expected=exp_if_names,
actual=actual_if_names)
# next check to see if there are any interfaces that should not be here
unexp_if_names = actual_if_names - exp_if_names
if unexp_if_names:
raise exc.UnexpectedError(unexpected=unexp_if_names)
# now for each interface, ensure that it is in the "good" state, which is
# "bundled"
if not actual_lag['interfaces']:
raise exc.MismatchError(
'No interfaces found in LAG',
expected=exp_if_names,
actual=""
)
for if_name, if_data in actual_lag['interfaces'].items():
port_status = if_data["actorPortStatus"]
if port_status != "bundled":
raise exc.MismatchError(
expected='bundled',
actual=port_status
)
return True
| 27.215116 | 98 | 0.584277 |
from nrfupytesteos import nrfu_exc as exc
TEST_CASE_NAME = "test-lag-status"
def make_testcase(dut, lag_name, interfaces):
return {
"test-case": TEST_CASE_NAME,
"dut": dut,
"params": {
"name": lag_name
},
"expected": {
"interfaces": interfaces
}
}
def snapshot_testdata(device):
return device.execute('show lacp neighbor')
def snapshot_testcases(device):
data = snapshot_testdata(device)
lags = data.get('portChannels')
return [
make_testcase(dut=device.hostname,
lag_name=lag_name,
interfaces=list(lag_data['interfaces']))
for lag_name, lag_data in lags.items()
]
def name_test(item):
return f"{item['params']['name']}"
def test_lag_status(device, actual, testcase):
lag_name = testcase['params']['name']
actual_lag = actual['portChannels'].get(lag_name)
if not actual_lag:
raise exc.MissingError(missing=lag_name)
actual_if_names = set(actual_lag['interfaces'])
exp_if_names = set(testcase['expected']['interfaces'])
missing_if_names = exp_if_names - actual_if_names
if missing_if_names:
raise exc.MismatchError(
expected=exp_if_names,
actual=actual_if_names)
unexp_if_names = actual_if_names - exp_if_names
if unexp_if_names:
raise exc.UnexpectedError(unexpected=unexp_if_names)
if not actual_lag['interfaces']:
raise exc.MismatchError(
'No interfaces found in LAG',
expected=exp_if_names,
actual=""
)
for if_name, if_data in actual_lag['interfaces'].items():
port_status = if_data["actorPortStatus"]
if port_status != "bundled":
raise exc.MismatchError(
expected='bundled',
actual=port_status
)
return True
| true | true |
1c360f1ec159c294a3c41e64d217af5c39f22a28 | 1,118 | py | Python | spirit/urls/topic_moderate.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | spirit/urls/topic_moderate.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | spirit/urls/topic_moderate.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from spirit.views.topic_moderate import TopicModerateDelete, TopicModerateUnDelete, \
TopicModerateLock, TopicModerateUnLock, TopicModeratePin, TopicModerateUnPin, \
TopicModerateGlobalPin, TopicModerateGlobalUnPin
urlpatterns = patterns(
"spirit.views.topic_moderate",
url(r'^delete/(?P<pk>\d+)/$', TopicModerateDelete.as_view(), name='topic-delete'),
url(r'^undelete/(?P<pk>\d+)/$', TopicModerateUnDelete.as_view(), name='topic-undelete'),
url(r'^lock/(?P<pk>\d+)/$', TopicModerateLock.as_view(), name='topic-lock'),
url(r'^unlock/(?P<pk>\d+)/$', TopicModerateUnLock.as_view(), name='topic-unlock'),
url(r'^pin/(?P<pk>\d+)/$', TopicModeratePin.as_view(), name='topic-pin'),
url(r'^unpin/(?P<pk>\d+)/$', TopicModerateUnPin.as_view(), name='topic-unpin'),
url(r'^globallypin/(?P<pk>\d+)/$', TopicModerateGlobalPin.as_view(), name='topic-global-pin'),
url(r'^ungloballypin/(?P<pk>\d+)/$', TopicModerateGlobalUnPin.as_view(), name='topic-global-unpin'),
)
| 41.407407 | 104 | 0.690519 |
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from spirit.views.topic_moderate import TopicModerateDelete, TopicModerateUnDelete, \
TopicModerateLock, TopicModerateUnLock, TopicModeratePin, TopicModerateUnPin, \
TopicModerateGlobalPin, TopicModerateGlobalUnPin
urlpatterns = patterns(
"spirit.views.topic_moderate",
url(r'^delete/(?P<pk>\d+)/$', TopicModerateDelete.as_view(), name='topic-delete'),
url(r'^undelete/(?P<pk>\d+)/$', TopicModerateUnDelete.as_view(), name='topic-undelete'),
url(r'^lock/(?P<pk>\d+)/$', TopicModerateLock.as_view(), name='topic-lock'),
url(r'^unlock/(?P<pk>\d+)/$', TopicModerateUnLock.as_view(), name='topic-unlock'),
url(r'^pin/(?P<pk>\d+)/$', TopicModeratePin.as_view(), name='topic-pin'),
url(r'^unpin/(?P<pk>\d+)/$', TopicModerateUnPin.as_view(), name='topic-unpin'),
url(r'^globallypin/(?P<pk>\d+)/$', TopicModerateGlobalPin.as_view(), name='topic-global-pin'),
url(r'^ungloballypin/(?P<pk>\d+)/$', TopicModerateGlobalUnPin.as_view(), name='topic-global-unpin'),
)
| true | true |
1c360fcb188120bdf4875fd0a5d6d490fd8a5027 | 3,984 | py | Python | fastreid/evaluation/reid_evaluation.py | xiaomingzhid/SSKD | 806d6db5c5dea4e018e49ee30d7bfc7b95977ffe | [
"Apache-2.0"
] | 19 | 2021-09-10T02:16:29.000Z | 2022-03-27T12:47:46.000Z | fastreid/evaluation/reid_evaluation.py | liuwuhomepage/sskd | 806d6db5c5dea4e018e49ee30d7bfc7b95977ffe | [
"Apache-2.0"
] | 5 | 2021-09-27T03:52:12.000Z | 2021-12-29T09:13:40.000Z | fastreid/evaluation/reid_evaluation.py | liuwuhomepage/sskd | 806d6db5c5dea4e018e49ee30d7bfc7b95977ffe | [
"Apache-2.0"
] | 3 | 2021-12-23T16:44:44.000Z | 2022-03-27T12:47:47.000Z | # encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
import copy
import logging
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
import pdb
from fastreid.utils import comm
from fastreid.utils.compute_dist import build_dist
from .evaluator import DatasetEvaluator
from .query_expansion import aqe
from .rank import evaluate_rank
from .roc import evaluate_roc
logger = logging.getLogger(__name__)
class ReidEvaluator(DatasetEvaluator):
def __init__(self, cfg, num_query, output_dir=None):
self.cfg = cfg
self._num_query = num_query
self._output_dir = output_dir
self.features = []
self.pids = []
self.camids = []
def reset(self):
self.features = []
self.pids = []
self.camids = []
def process(self, inputs, outputs):
self.pids.extend(inputs["targets"])
self.camids.extend(inputs["camids"])
self.features.append(outputs.cpu())
def evaluate(self):
if comm.get_world_size() > 1:
comm.synchronize()
features = comm.gather(self.features)
features = sum(features, [])
pids = comm.gather(self.pids)
pids = sum(pids, [])
camids = comm.gather(self.camids)
camids = sum(camids, [])
# fmt: off
if not comm.is_main_process(): return {}
# fmt: on
else:
features = self.features
pids = self.pids
camids = self.camids
features = torch.cat(features, dim=0)
# query feature, person ids and camera ids
query_features = features[:self._num_query]
query_pids = np.asarray(pids[:self._num_query])
query_camids = np.asarray(camids[:self._num_query])
# gallery features, person ids and camera ids
gallery_features = features[self._num_query:]
gallery_pids = np.asarray(pids[self._num_query:])
gallery_camids = np.asarray(camids[self._num_query:])
self._results = OrderedDict()
if self.cfg.TEST.AQE.ENABLED:
logger.info("Test with AQE setting")
qe_time = self.cfg.TEST.AQE.QE_TIME
qe_k = self.cfg.TEST.AQE.QE_K
alpha = self.cfg.TEST.AQE.ALPHA
query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)
dist = build_dist(query_features, gallery_features, self.cfg.TEST.METRIC)
if self.cfg.TEST.RERANK.ENABLED:
logger.info("Test with rerank setting")
k1 = self.cfg.TEST.RERANK.K1
k2 = self.cfg.TEST.RERANK.K2
lambda_value = self.cfg.TEST.RERANK.LAMBDA
if self.cfg.TEST.METRIC == "cosine":
query_features = F.normalize(query_features, dim=1)
gallery_features = F.normalize(gallery_features, dim=1)
rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
dist = rerank_dist * (1 - lambda_value) + dist * lambda_value
cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)
mAP = np.mean(all_AP)
mINP = np.mean(all_INP)
for r in [1, 5, 10]:
self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
self._results['mAP'] = mAP * 100
self._results['mINP'] = mINP * 100
self._results["metric"] = (mAP + cmc[0]) / 2 * 100
if self.cfg.TEST.ROC_ENABLED:
scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
fprs, tprs, thres = metrics.roc_curve(labels, scores)
for fpr in [1e-4, 1e-3, 1e-2]:
ind = np.argmin(np.abs(fprs - fpr))
self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]
return copy.deepcopy(self._results)
| 33.478992 | 106 | 0.615462 |
import copy
import logging
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
import pdb
from fastreid.utils import comm
from fastreid.utils.compute_dist import build_dist
from .evaluator import DatasetEvaluator
from .query_expansion import aqe
from .rank import evaluate_rank
from .roc import evaluate_roc
logger = logging.getLogger(__name__)
class ReidEvaluator(DatasetEvaluator):
def __init__(self, cfg, num_query, output_dir=None):
self.cfg = cfg
self._num_query = num_query
self._output_dir = output_dir
self.features = []
self.pids = []
self.camids = []
def reset(self):
self.features = []
self.pids = []
self.camids = []
def process(self, inputs, outputs):
self.pids.extend(inputs["targets"])
self.camids.extend(inputs["camids"])
self.features.append(outputs.cpu())
def evaluate(self):
if comm.get_world_size() > 1:
comm.synchronize()
features = comm.gather(self.features)
features = sum(features, [])
pids = comm.gather(self.pids)
pids = sum(pids, [])
camids = comm.gather(self.camids)
camids = sum(camids, [])
if not comm.is_main_process(): return {}
else:
features = self.features
pids = self.pids
camids = self.camids
features = torch.cat(features, dim=0)
query_features = features[:self._num_query]
query_pids = np.asarray(pids[:self._num_query])
query_camids = np.asarray(camids[:self._num_query])
gallery_features = features[self._num_query:]
gallery_pids = np.asarray(pids[self._num_query:])
gallery_camids = np.asarray(camids[self._num_query:])
self._results = OrderedDict()
if self.cfg.TEST.AQE.ENABLED:
logger.info("Test with AQE setting")
qe_time = self.cfg.TEST.AQE.QE_TIME
qe_k = self.cfg.TEST.AQE.QE_K
alpha = self.cfg.TEST.AQE.ALPHA
query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)
dist = build_dist(query_features, gallery_features, self.cfg.TEST.METRIC)
if self.cfg.TEST.RERANK.ENABLED:
logger.info("Test with rerank setting")
k1 = self.cfg.TEST.RERANK.K1
k2 = self.cfg.TEST.RERANK.K2
lambda_value = self.cfg.TEST.RERANK.LAMBDA
if self.cfg.TEST.METRIC == "cosine":
query_features = F.normalize(query_features, dim=1)
gallery_features = F.normalize(gallery_features, dim=1)
rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
dist = rerank_dist * (1 - lambda_value) + dist * lambda_value
cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)
mAP = np.mean(all_AP)
mINP = np.mean(all_INP)
for r in [1, 5, 10]:
self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
self._results['mAP'] = mAP * 100
self._results['mINP'] = mINP * 100
self._results["metric"] = (mAP + cmc[0]) / 2 * 100
if self.cfg.TEST.ROC_ENABLED:
scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
fprs, tprs, thres = metrics.roc_curve(labels, scores)
for fpr in [1e-4, 1e-3, 1e-2]:
ind = np.argmin(np.abs(fprs - fpr))
self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]
return copy.deepcopy(self._results)
| true | true |
1c3610639e476b377d7c9d09ba251b29f5ac72d2 | 17,624 | py | Python | anomalib/data/btech.py | dreaquil/anomalib | 0199f05e09a67967c8512a923059ae0105f849a2 | [
"Apache-2.0"
] | null | null | null | anomalib/data/btech.py | dreaquil/anomalib | 0199f05e09a67967c8512a923059ae0105f849a2 | [
"Apache-2.0"
] | null | null | null | anomalib/data/btech.py | dreaquil/anomalib | 0199f05e09a67967c8512a923059ae0105f849a2 | [
"Apache-2.0"
] | null | null | null | """BTech Dataset.
This script contains PyTorch Lightning DataModule for the BTech dataset.
If the dataset is not on the file system, the script downloads and
extracts the dataset and create PyTorch data objects.
"""
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
import shutil
import zipfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from urllib.request import urlretrieve
import albumentations as A
import cv2
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import VisionDataset
from tqdm import tqdm
from anomalib.data.inference import InferenceDataset
from anomalib.data.utils import DownloadProgressBar, read_image
from anomalib.data.utils.split import (
create_validation_set_from_test_set,
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
def make_btech_dataset(
path: Path,
split: Optional[str] = None,
split_ratio: float = 0.1,
seed: int = 0,
create_validation_set: bool = False,
) -> DataFrame:
"""Create BTech samples by parsing the BTech data file structure.
The files are expected to follow the structure:
path/to/dataset/split/category/image_filename.png
path/to/dataset/ground_truth/category/mask_filename.png
Args:
path (Path): Path to dataset
split (str, optional): Dataset split (ie., either train or test). Defaults to None.
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.1.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional): Boolean to create a validation set from the test set.
BTech dataset does not contain a validation set. Those wanting to create a validation set
could set this flag to ``True``.
Example:
The following example shows how to get training samples from BTech 01 category:
>>> root = Path('./BTech')
>>> category = '01'
>>> path = root / category
>>> path
PosixPath('BTech/01')
>>> samples = make_btech_dataset(path, split='train', split_ratio=0.1, seed=0)
>>> samples.head()
path split label image_path mask_path label_index
0 BTech/01 train 01 BTech/01/train/ok/105.bmp BTech/01/ground_truth/ok/105.png 0
1 BTech/01 train 01 BTech/01/train/ok/017.bmp BTech/01/ground_truth/ok/017.png 0
...
Returns:
DataFrame: an output dataframe containing samples for the requested split (ie., train or test)
"""
samples_list = [
(str(path),) + filename.parts[-3:] for filename in path.glob("**/*") if filename.suffix in (".bmp", ".png")
]
if len(samples_list) == 0:
raise RuntimeError(f"Found 0 images in {path}")
samples = pd.DataFrame(samples_list, columns=["path", "split", "label", "image_path"])
samples = samples[samples.split != "ground_truth"]
# Create mask_path column
samples["mask_path"] = (
samples.path
+ "/ground_truth/"
+ samples.label
+ "/"
+ samples.image_path.str.rstrip("png").str.rstrip(".")
+ ".png"
)
# Modify image_path column by converting to absolute path
samples["image_path"] = samples.path + "/" + samples.split + "/" + samples.label + "/" + samples.image_path
# Split the normal images in training set if test set doesn't
# contain any normal images. This is needed because AUC score
# cannot be computed based on 1-class
if sum((samples.split == "test") & (samples.label == "ok")) == 0:
samples = split_normal_images_in_train_set(samples, split_ratio, seed)
# Good images don't have mask
samples.loc[(samples.split == "test") & (samples.label == "ok"), "mask_path"] = ""
# Create label index for normal (0) and anomalous (1) images.
samples.loc[(samples.label == "ok"), "label_index"] = 0
samples.loc[(samples.label != "ok"), "label_index"] = 1
samples.label_index = samples.label_index.astype(int)
if create_validation_set:
samples = create_validation_set_from_test_set(samples, seed=seed)
# Get the data frame for the split.
if split is not None and split in ["train", "val", "test"]:
samples = samples[samples.split == split]
samples = samples.reset_index(drop=True)
return samples
class BTech(VisionDataset):
"""BTech PyTorch Dataset."""
def __init__(
self,
root: Union[Path, str],
category: str,
pre_process: PreProcessor,
split: str,
task: str = "segmentation",
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Btech Dataset class.
Args:
root: Path to the BTech dataset
category: Name of the BTech category.
pre_process: List of pre_processing object containing albumentation compose.
split: 'train', 'val' or 'test'
task: ``classification`` or ``segmentation``
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples:
>>> from anomalib.data.btech import BTech
>>> from anomalib.data.transforms import PreProcessor
>>> pre_process = PreProcessor(image_size=256)
>>> dataset = BTech(
... root='./datasets/BTech',
... category='leather',
... pre_process=pre_process,
... task="classification",
... is_train=True,
... )
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image', 'image_path', 'label'])
>>> dataset.task = "segmentation"
>>> dataset.split = "train"
>>> dataset[0].keys()
dict_keys(['image'])
>>> dataset.split = "test"
>>> dataset[0].keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> dataset[0]["image"].shape, dataset[0]["mask"].shape
(torch.Size([3, 256, 256]), torch.Size([256, 256]))
"""
super().__init__(root)
self.root = Path(root) if isinstance(root, str) else root
self.category: str = category
self.split = split
self.task = task
self.pre_process = pre_process
self.samples = make_btech_dataset(
path=self.root / category,
split=self.split,
seed=seed,
create_validation_set=create_validation_set,
)
def __len__(self) -> int:
"""Get length of the dataset."""
return len(self.samples)
def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]:
"""Get dataset item for the index ``index``.
Args:
index (int): Index to get the item.
Returns:
Union[Dict[str, Tensor], Dict[str, Union[str, Tensor]]]: Dict of image tensor during training.
Otherwise, Dict containing image path, target path, image tensor, label and transformed bounding box.
"""
item: Dict[str, Union[str, Tensor]] = {}
image_path = self.samples.image_path[index]
image = read_image(image_path)
pre_processed = self.pre_process(image=image)
item = {"image": pre_processed["image"]}
if self.split in ["val", "test"]:
label_index = self.samples.label_index[index]
item["image_path"] = image_path
item["label"] = label_index
if self.task == "segmentation":
mask_path = self.samples.mask_path[index]
# Only Anomalous (1) images has masks in BTech dataset.
# Therefore, create empty mask for Normal (0) images.
if label_index == 0:
mask = np.zeros(shape=image.shape[:2])
else:
mask = cv2.imread(mask_path, flags=0) / 255.0
pre_processed = self.pre_process(image=image, mask=mask)
item["mask_path"] = mask_path
item["image"] = pre_processed["image"]
item["mask"] = pre_processed["mask"]
return item
class BTechDataModule(LightningDataModule):
"""BTechDataModule Lightning Data Module."""
def __init__(
self,
root: str,
category: str,
# TODO: Remove default values. IAAALD-211
image_size: Optional[Union[int, Tuple[int, int]]] = None,
train_batch_size: int = 32,
test_batch_size: int = 32,
num_workers: int = 8,
task: str = "segmentation",
transform_config_train: Optional[Union[str, A.Compose]] = None,
transform_config_val: Optional[Union[str, A.Compose]] = None,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
"""Instantiate BTech Lightning Data Module.
Args:
root: Path to the BTech dataset
category: Name of the BTech category.
image_size: Variable to which image is resized.
train_batch_size: Training batch size.
test_batch_size: Testing batch size.
num_workers: Number of workers.
task: ``classification`` or ``segmentation``
transform_config_train: Config for pre-processing during training.
transform_config_val: Config for pre-processing during validation.
seed: seed used for the random subset splitting
create_validation_set: Create a validation subset in addition to the train and test subsets
Examples
>>> from anomalib.data import BTechDataModule
>>> datamodule = BTechDataModule(
... root="./datasets/BTech",
... category="leather",
... image_size=256,
... train_batch_size=32,
... test_batch_size=32,
... num_workers=8,
... transform_config_train=None,
... transform_config_val=None,
... )
>>> datamodule.setup()
>>> i, data = next(enumerate(datamodule.train_dataloader()))
>>> data.keys()
dict_keys(['image'])
>>> data["image"].shape
torch.Size([32, 3, 256, 256])
>>> i, data = next(enumerate(datamodule.val_dataloader()))
>>> data.keys()
dict_keys(['image_path', 'label', 'mask_path', 'image', 'mask'])
>>> data["image"].shape, data["mask"].shape
(torch.Size([32, 3, 256, 256]), torch.Size([32, 256, 256]))
"""
super().__init__()
self.root = root if isinstance(root, Path) else Path(root)
self.category = category
self.dataset_path = self.root / self.category
self.transform_config_train = transform_config_train
self.transform_config_val = transform_config_val
self.image_size = image_size
if self.transform_config_train is not None and self.transform_config_val is None:
self.transform_config_val = self.transform_config_train
self.pre_process_train = PreProcessor(config=self.transform_config_train, image_size=self.image_size)
self.pre_process_val = PreProcessor(config=self.transform_config_val, image_size=self.image_size)
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.create_validation_set = create_validation_set
self.task = task
self.seed = seed
self.train_data: Dataset
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
self.inference_data: Dataset
def prepare_data(self) -> None:
"""Download the dataset if not available."""
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"
logging.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
) # nosec
logging.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)
logging.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))
# NOTE: Each BTech category has different image extension as follows
# | Category | Image | Mask |
# |----------|-------|------|
# | 01 | bmp | png |
# | 02 | png | png |
# | 03 | bmp | bmp |
# To avoid any conflict, the following script converts all the extensions to png.
# This solution works fine, but it's also possible to properly ready the bmp and
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()
logging.info("Cleaning the tar file")
zip_filename.unlink()
def setup(self, stage: Optional[str] = None) -> None:
"""Setup train, validation and test data.
BTech dataset uses BTech dataset structure, which is the reason for
using `anomalib.data.btech.BTech` class to get the dataset items.
Args:
stage: Optional[str]: Train/Val/Test stages. (Default value = None)
"""
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_train,
split="train",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if self.create_validation_set:
self.val_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_val,
split="val",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
self.test_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_val,
split="test",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage == "predict":
self.inference_data = InferenceDataset(
path=self.root, image_size=self.image_size, transform_config=self.transform_config_val
)
def train_dataloader(self) -> TRAIN_DATALOADERS:
"""Get train dataloader."""
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)
def val_dataloader(self) -> EVAL_DATALOADERS:
"""Get validation dataloader."""
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def test_dataloader(self) -> EVAL_DATALOADERS:
"""Get test dataloader."""
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def predict_dataloader(self) -> EVAL_DATALOADERS:
"""Get predict dataloader."""
return DataLoader(
self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers
)
| 38.819383 | 120 | 0.611439 |
import logging
import shutil
import zipfile
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from urllib.request import urlretrieve
import albumentations as A
import cv2
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch import Tensor
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision.datasets.folder import VisionDataset
from tqdm import tqdm
from anomalib.data.inference import InferenceDataset
from anomalib.data.utils import DownloadProgressBar, read_image
from anomalib.data.utils.split import (
create_validation_set_from_test_set,
split_normal_images_in_train_set,
)
from anomalib.pre_processing import PreProcessor
logger = logging.getLogger(name="Dataset: BTech")
logger.setLevel(logging.DEBUG)
def make_btech_dataset(
path: Path,
split: Optional[str] = None,
split_ratio: float = 0.1,
seed: int = 0,
create_validation_set: bool = False,
) -> DataFrame:
samples_list = [
(str(path),) + filename.parts[-3:] for filename in path.glob("**/*") if filename.suffix in (".bmp", ".png")
]
if len(samples_list) == 0:
raise RuntimeError(f"Found 0 images in {path}")
samples = pd.DataFrame(samples_list, columns=["path", "split", "label", "image_path"])
samples = samples[samples.split != "ground_truth"]
samples["mask_path"] = (
samples.path
+ "/ground_truth/"
+ samples.label
+ "/"
+ samples.image_path.str.rstrip("png").str.rstrip(".")
+ ".png"
)
samples["image_path"] = samples.path + "/" + samples.split + "/" + samples.label + "/" + samples.image_path
# contain any normal images. This is needed because AUC score
# cannot be computed based on 1-class
if sum((samples.split == "test") & (samples.label == "ok")) == 0:
samples = split_normal_images_in_train_set(samples, split_ratio, seed)
# Good images don't have mask
samples.loc[(samples.split == "test") & (samples.label == "ok"), "mask_path"] = ""
samples.loc[(samples.label == "ok"), "label_index"] = 0
samples.loc[(samples.label != "ok"), "label_index"] = 1
samples.label_index = samples.label_index.astype(int)
if create_validation_set:
samples = create_validation_set_from_test_set(samples, seed=seed)
if split is not None and split in ["train", "val", "test"]:
samples = samples[samples.split == split]
samples = samples.reset_index(drop=True)
return samples
class BTech(VisionDataset):
def __init__(
self,
root: Union[Path, str],
category: str,
pre_process: PreProcessor,
split: str,
task: str = "segmentation",
seed: int = 0,
create_validation_set: bool = False,
) -> None:
super().__init__(root)
self.root = Path(root) if isinstance(root, str) else root
self.category: str = category
self.split = split
self.task = task
self.pre_process = pre_process
self.samples = make_btech_dataset(
path=self.root / category,
split=self.split,
seed=seed,
create_validation_set=create_validation_set,
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int) -> Dict[str, Union[str, Tensor]]:
item: Dict[str, Union[str, Tensor]] = {}
image_path = self.samples.image_path[index]
image = read_image(image_path)
pre_processed = self.pre_process(image=image)
item = {"image": pre_processed["image"]}
if self.split in ["val", "test"]:
label_index = self.samples.label_index[index]
item["image_path"] = image_path
item["label"] = label_index
if self.task == "segmentation":
mask_path = self.samples.mask_path[index]
if label_index == 0:
mask = np.zeros(shape=image.shape[:2])
else:
mask = cv2.imread(mask_path, flags=0) / 255.0
pre_processed = self.pre_process(image=image, mask=mask)
item["mask_path"] = mask_path
item["image"] = pre_processed["image"]
item["mask"] = pre_processed["mask"]
return item
class BTechDataModule(LightningDataModule):
def __init__(
self,
root: str,
category: str,
image_size: Optional[Union[int, Tuple[int, int]]] = None,
train_batch_size: int = 32,
test_batch_size: int = 32,
num_workers: int = 8,
task: str = "segmentation",
transform_config_train: Optional[Union[str, A.Compose]] = None,
transform_config_val: Optional[Union[str, A.Compose]] = None,
seed: int = 0,
create_validation_set: bool = False,
) -> None:
super().__init__()
self.root = root if isinstance(root, Path) else Path(root)
self.category = category
self.dataset_path = self.root / self.category
self.transform_config_train = transform_config_train
self.transform_config_val = transform_config_val
self.image_size = image_size
if self.transform_config_train is not None and self.transform_config_val is None:
self.transform_config_val = self.transform_config_train
self.pre_process_train = PreProcessor(config=self.transform_config_train, image_size=self.image_size)
self.pre_process_val = PreProcessor(config=self.transform_config_val, image_size=self.image_size)
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.num_workers = num_workers
self.create_validation_set = create_validation_set
self.task = task
self.seed = seed
self.train_data: Dataset
self.test_data: Dataset
if create_validation_set:
self.val_data: Dataset
self.inference_data: Dataset
def prepare_data(self) -> None:
if (self.root / self.category).is_dir():
logging.info("Found the dataset.")
else:
zip_filename = self.root.parent / "btad.zip"
logging.info("Downloading the BTech dataset.")
with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc="BTech") as progress_bar:
urlretrieve(
url="https://avires.dimi.uniud.it/papers/btad/btad.zip",
filename=zip_filename,
reporthook=progress_bar.update_to,
)
logging.info("Extracting the dataset.")
with zipfile.ZipFile(zip_filename, "r") as zip_file:
zip_file.extractall(self.root.parent)
logging.info("Renaming the dataset directory")
shutil.move(src=str(self.root.parent / "BTech_Dataset_transformed"), dst=str(self.root))
# png filenames from categories in `make_btech_dataset` function.
logging.info("Convert the bmp formats to png to have consistent image extensions")
for filename in tqdm(self.root.glob("**/*.bmp"), desc="Converting bmp to png"):
image = cv2.imread(str(filename))
cv2.imwrite(str(filename.with_suffix(".png")), image)
filename.unlink()
logging.info("Cleaning the tar file")
zip_filename.unlink()
def setup(self, stage: Optional[str] = None) -> None:
if stage in (None, "fit"):
self.train_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_train,
split="train",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if self.create_validation_set:
self.val_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_val,
split="val",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
self.test_data = BTech(
root=self.root,
category=self.category,
pre_process=self.pre_process_val,
split="test",
task=self.task,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
if stage == "predict":
self.inference_data = InferenceDataset(
path=self.root, image_size=self.image_size, transform_config=self.transform_config_val
)
def train_dataloader(self) -> TRAIN_DATALOADERS:
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batch_size, num_workers=self.num_workers)
def val_dataloader(self) -> EVAL_DATALOADERS:
dataset = self.val_data if self.create_validation_set else self.test_data
return DataLoader(dataset=dataset, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def test_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(self.test_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers)
def predict_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(
self.inference_data, shuffle=False, batch_size=self.test_batch_size, num_workers=self.num_workers
)
| true | true |
1c3610a6c697df7c0a2b230729dc2c1ecd527699 | 195,851 | py | Python | src/azure-cli/azure/cli/command_modules/acs/decorator.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import sys
import time
from distutils.version import StrictVersion
from typing import Any, Dict, List, Tuple, TypeVar, Union
from azure.cli.command_modules.acs._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_PRIVATE_DNS_ZONE_SYSTEM,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
DecoratorMode,
)
from azure.cli.command_modules.acs.custom import (
_add_role_assignment,
_ensure_aks_acr,
_ensure_aks_service_principal,
_ensure_cluster_identity_permission_on_kubelet_identity,
_ensure_container_insights_for_monitoring,
_ensure_default_log_analytics_workspace_for_monitoring,
_get_rg_location,
_get_user_assigned_identity,
_put_managed_cluster_ensuring_permission,
create_load_balancer_profile,
set_load_balancer_sku,
subnet_role_assignment_exists,
)
from azure.cli.core import AzCommandsLoader
from azure.cli.core._profile import Profile
from azure.cli.core.azclierror import (
ArgumentUsageError,
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
NoTTYError,
RequiredArgumentMissingError,
UnknownError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import truncate_text
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt, prompt_pass, prompt_y_n
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id
logger = get_logger(__name__)
# type variables
ContainerServiceClient = TypeVar("ContainerServiceClient")
Identity = TypeVar("Identity")
ManagedCluster = TypeVar("ManagedCluster")
ManagedClusterLoadBalancerProfile = TypeVar("ManagedClusterLoadBalancerProfile")
ResourceReference = TypeVar("ResourceReference")
# TODO
# remove model loading for cluster_autoscaler_profile in _validators
# add validation for all/some of the parameters involved in the getter of outbound_type/enable_addons
def format_parameter_name_to_option_name(parameter_name: str) -> str:
"""Convert a name in parameter format to option format.
Underscores ("_") are used to connect the various parts of a parameter name, while hyphens ("-") are used to connect
each part of an option name. Besides, the option name starts with double hyphens ("--").
:return: str
"""
option_name = "--" + parameter_name.replace("_", "-")
return option_name
def safe_list_get(li: List, idx: int, default: Any = None) -> Any:
"""Get an element from a list without raising IndexError.
Attempt to get the element with index idx from a list-like object li, and if the index is invalid (such as out of
range), return default (whose default value is None).
:return: an element of any type
"""
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
def safe_lower(obj: Any) -> Any:
"""Return lowercase string if the provided obj is a string, otherwise return the object itself.
:return: Any
"""
if isinstance(obj, str):
return obj.lower()
return obj
def check_is_msi_cluster(mc: ManagedCluster) -> bool:
"""Check `mc` object to determine whether managed identity is enabled.
:return: bool
"""
if mc and mc.identity and mc.identity.type is not None:
identity_type = mc.identity.type.casefold()
if identity_type in ("systemassigned", "userassigned"):
return True
return False
def validate_counts_in_autoscaler(
node_count,
enable_cluster_autoscaler,
min_count,
max_count,
decorator_mode,
) -> None:
"""Check the validity of serveral count-related parameters in autoscaler.
On the premise that enable_cluster_autoscaler (in update mode, this could be update_cluster_autoscaler) is enabled,
it will check whether both min_count and max_count are assigned, if not, raise the RequiredArgumentMissingError. If
min_count is less than max_count, raise the InvalidArgumentValueError. Only in create mode it will check whether the
value of node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler (in update mode, this could be update_cluster_autoscaler) is not enabled, it will check
whether any of min_count or max_count is assigned, if so, raise the RequiredArgumentMissingError.
:return: None
"""
# validation
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if decorator_mode == DecoratorMode.CREATE:
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
option_name = "--enable-cluster-autoscaler"
if decorator_mode == DecoratorMode.UPDATE:
option_name += " or --update-cluster-autoscaler"
raise RequiredArgumentMissingError(
"min-count and max-count are required for {}, please use the flag".format(
option_name
)
)
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class AKSModels:
"""Store the models used in aks_create.
The api version of the class corresponding to a model is determined by resource_type.
"""
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ManagedClusterAddonProfile = self.__cmd.get_models(
"ManagedClusterAddonProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# not directly used
self.ManagedClusterPropertiesAutoScalerProfile = self.__cmd.get_models(
"ManagedClusterPropertiesAutoScalerProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# init load balancer models
self.init_lb_models()
def init_lb_models(self) -> None:
"""Initialize models used by load balancer.
The models are stored in a dictionary, the key is the model name and the value is the model type.
:return: None
"""
lb_models = {}
lb_models["ManagedClusterLoadBalancerProfile"] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileManagedOutboundIPs"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileManagedOutboundIPs",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileOutboundIPs"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileOutboundIPs",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models["ResourceReference"] = self.__cmd.get_models(
"ResourceReference",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.lb_models = lb_models
# Note: Uncomment the followings to add these models as class attributes.
# for model_name, model_type in lb_models.items():
# setattr(self, model_name, model_type)
# pylint: disable=too-many-public-methods
class AKSContext:
"""Implement getter functions for all parameters in aks_create.
Note: One of the most basic principles is that when parameters are put into a certain profile (and further
decorated into the ManagedCluster object by AKSCreateDecorator), it shouldn't be modified any more, only
read-only operations (e.g. validation) can be performed.
This class also stores a copy of the original function parameters, some intermediate variables (such as the
subscription ID) and a reference of the ManagedCluster object.
When adding a new parameter for aks_create, please also provide a "getter" function named `get_xxx`, where `xxx` is
the parameter name. In this function, the process of obtaining parameter values, dynamic completion (optional),
and validation (optional) should be followed. The obtaining of parameter values should further follow the order
of obtaining from the ManagedCluster object or from the original value.
Attention: In case of checking the validity of parameters, make sure enable_validation is never set to True and
read_only is set to True when necessary to avoid loop calls, when using the getter function to obtain the value of
other parameters.
"""
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict, decorator_mode):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.decorator_mode = decorator_mode
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc: ManagedCluster) -> None:
"""Attach the ManagedCluster object to the context.
The `mc` object is only allowed to be attached once, and attaching again will raise a CLIInternalError.
:return: None
"""
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None) -> Any:
"""Get the value of an intermediate by its name.
Get the value from the intermediates dictionary with variable_name as the key. If variable_name does not exist,
default_value will be returned.
:return: Any
"""
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
) -> None:
"""Set the value of an intermediate by its name.
In the case that the intermediate value already exists, if overwrite_exists is enabled, the value will be
overwritten and the log will be output at the debug level, otherwise the value will not be overwritten and
the log will be output at the warning level, which by default will be output to stderr and seen by user.
:return: None
"""
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled. " \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
# warning level log will be output to the console, which may cause confusion to users
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str) -> None:
"""Remove the value of an intermediate by its name.
No exception will be raised if the intermediate does not exist.
:return: None
"""
self.intermediates.pop(variable_name, None)
def get_subscription_id(self):
"""Helper function to obtain the value of subscription_id.
Note: This is not a parameter of aks_create, and it will not be decorated into the `mc` object.
If no corresponding intermediate exists, method "get_subscription_id" of class "Profile" will be called, which
depends on "az login" in advance, the returned subscription_id will be stored as an intermediate.
:return: string
"""
subscription_id = self.get_intermediate("subscription_id", None)
if not subscription_id:
subscription_id = self.cmd.cli_ctx.data.get('subscription_id')
if not subscription_id:
subscription_id = Profile(cli_ctx=self.cmd.cli_ctx).get_subscription_id()
self.cmd.cli_ctx.data['subscription_id'] = subscription_id
self.set_intermediate("subscription_id", subscription_id, overwrite_exists=True)
return subscription_id
def get_resource_group_name(self) -> str:
"""Obtain the value of resource_group_name.
Note: resource_group_name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
resource_group_name = self.raw_param.get("resource_group_name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return resource_group_name
def get_name(self) -> str:
"""Obtain the value of name.
Note: name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
name = self.raw_param.get("name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return name
# pylint: disable=unused-argument
def _get_location(self, read_only: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to dynamically obtain the value of location according to the context.
When location is not assigned, dynamic completion will be triggerd. Function "_get_rg_location" will be called
to get the location of the provided resource group, which internally used ResourceManagementClient to send
the request.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
location = self.raw_param.get("location")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if self.mc and self.mc.location is not None:
location = self.mc.location
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return location
# dynamic completion
if not read_from_mc and location is None:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
# this parameter does not need validation
return location
def get_location(self) -> Union[str, None]:
"""Dynamically obtain the value of location according to the context.
When location is not assigned, dynamic completion will be triggerd. Function "_get_rg_location" will be called
to get the location of the provided resource group, which internally used ResourceManagementClient to send
the request.
:return: string or None
"""
return self._get_location()
def get_ssh_key_value_and_no_ssh_key(self) -> Tuple[str, bool]:
"""Obtain the value of ssh_key_value and no_ssh_key.
Note: no_ssh_key will not be decorated into the `mc` object.
If the user does not explicitly specify --ssh-key-value, the validator function "validate_ssh_key" will check
the default file location "~/.ssh/id_rsa.pub", if the file exists, read its content and return. Otherise,
create a key pair at "~/.ssh/id_rsa.pub" and return the public key.
If the user provides a string-like input for --ssh-key-value, the validator function "validate_ssh_key" will
check whether it is a file path, if so, read its content and return; if it is a valid public key, return it.
Otherwise, create a key pair there and return the public key.
This function will verify the parameters by default. It will verify the validity of ssh_key_value. If parameter
no_ssh_key is set to True, verification will be skipped. Otherwise, an InvalidArgumentValueError will be raised
when the value of ssh_key_value is invalid.
:return: a tuple containing two elements: ssh_key_value of string type and no_ssh_key of bool type
"""
# ssh_key_value
# read the original value passed by the command
raw_value = self.raw_param.get("ssh_key_value")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
read_from_mc = True
else:
ssh_key_value = raw_value
# no_ssh_key
# read the original value passed by the command
no_ssh_key = self.raw_param.get("no_ssh_key")
# consistent check
if read_from_mc and no_ssh_key:
raise CLIInternalError(
"Inconsistent state detected, ssh_key_value is read from the `mc` object while no_ssh_key is enabled."
)
# these parameters do not need dynamic completion
# validation
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(
ssh_key_value
):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise InvalidArgumentValueError(
"Provided ssh key ({}) is invalid or non-existent".format(
shortened_key
)
)
return ssh_key_value, no_ssh_key
# pylint: disable=unused-argument
def _get_dns_name_prefix(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
"""Internal function to dynamically obtain the value of dns_name_prefix according to the context.
When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. A default
dns_name_prefix composed of name (cluster), resource_group_name, and subscription_id will be created.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
dns_name_prefix = self.raw_param.get("dns_name_prefix")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if self.mc and self.mc.dns_prefix is not None:
dns_name_prefix = self.mc.dns_prefix
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return dns_name_prefix
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not dns_name_prefix and not self._get_fqdn_subdomain(enable_validation=False):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
# In case the user does not specify the parameter and it meets the conditions of automatic completion,
# necessary information is dynamically completed.
if dynamic_completion:
name = self.get_name()
resource_group_name = self.get_resource_group_name()
subscription_id = self.get_subscription_id()
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
dns_name_prefix = '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# validation
if enable_validation:
if dns_name_prefix and self._get_fqdn_subdomain(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
def get_dns_name_prefix(self) -> Union[str, None]:
"""Dynamically obtain the value of dns_name_prefix according to the context.
When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. A default
dns_name_prefix composed of name (cluster), resource_group_name, and subscription_id will be created.
This function will verify the parameter by default. It will check if both dns_name_prefix and fqdn_subdomain
are assigend, if so, raise the MutuallyExclusiveArgumentError.
:return: string or None
"""
return self._get_dns_name_prefix(enable_validation=True)
def get_kubernetes_version(self) -> str:
"""Obtain the value of kubernetes_version.
:return: string
"""
# read the original value passed by the command
kubernetes_version = self.raw_param.get("kubernetes_version")
# try to read the property value corresponding to the parameter from the `mc` object
if self.mc and self.mc.kubernetes_version is not None:
kubernetes_version = self.mc.kubernetes_version
# this parameter does not need dynamic completion
# this parameter does not need validation
return kubernetes_version
# pylint: disable=unused-argument
def _get_vm_set_type(self, read_only: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to dynamically obtain the value of vm_set_type according to the context.
Dynamic completion will be triggerd by default. The value of vm set type will be set according to the value of
kubernetes_version. It will also normalize the value as server validation is case-sensitive.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("vm_set_type")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
# skip dynamic completion & validation if option read_only is specified
if read_only:
return vm_set_type
# dynamic completion
# the value verified by the validator may have case problems, and we will adjust it by default
if not read_from_mc:
kubernetes_version = self.get_kubernetes_version()
if not vm_set_type:
if kubernetes_version and StrictVersion(kubernetes_version) < StrictVersion("1.12.9"):
print(
"Setting vm_set_type to availabilityset as it is not specified and kubernetes version({}) "
"less than 1.12.9 only supports availabilityset\n".format(
kubernetes_version
)
)
vm_set_type = "AvailabilitySet"
if not vm_set_type:
vm_set_type = "VirtualMachineScaleSets"
# normalize as server validation is case-sensitive
if vm_set_type.lower() == "AvailabilitySet".lower():
vm_set_type = "AvailabilitySet"
if vm_set_type.lower() == "VirtualMachineScaleSets".lower():
vm_set_type = "VirtualMachineScaleSets"
return vm_set_type
# this parameter does not need validation
return vm_set_type
def get_vm_set_type(self) -> Union[str, None]:
"""Dynamically obtain the value of vm_set_type according to the context.
Dynamic completion will be triggerd by default. The value of vm set type will be set according to the value of
kubernetes_version. It will also normalize the value as server validation is case-sensitive.
:return: string or None
"""
# this parameter does not need validation
return self._get_vm_set_type()
def get_nodepool_name(self) -> str:
"""Dynamically obtain the value of nodepool_name according to the context.
Note: SDK performs the following validation {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'}.
This function will normalize the parameter by default. If no value is assigned, the default value "nodepool1"
is set, and if the string length is greater than 12, it is truncated.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_name")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
# set default value
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
else:
nodepool_name = raw_value
# normalize
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
# this parameter does not need validation
return nodepool_name
def get_nodepool_tags(self) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_tags.
:return: dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_tags")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
# set default value
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_tags
def get_nodepool_labels(self) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_labels.
:return: dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_labels")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
# set default value
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_labels
def get_node_vm_size(self) -> str:
"""Obtain the value of node_vm_size.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_vm_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
# set default value
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_vm_size
def get_vnet_subnet_id(self) -> Union[str, None]:
"""Obtain the value of vnet_subnet_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("vnet_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
# set default value
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return vnet_subnet_id
def get_ppg(self) -> Union[str, None]:
"""Obtain the value of ppg (proximity_placement_group_id).
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("ppg")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
# set default value
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return ppg
def get_zones(self) -> Union[List[str], None]:
"""Obtain the value of zones.
:return: list of strings or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("zones")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
# set default value
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return zones
def get_enable_node_public_ip(self) -> bool:
"""Obtain the value of enable_node_public_ip.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_node_public_ip")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
# set default value
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_node_public_ip
def get_node_public_ip_prefix_id(self) -> Union[str, None]:
"""Obtain the value of node_public_ip_prefix_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_public_ip_prefix_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
# set default value
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_public_ip_prefix_id
def get_enable_encryption_at_host(self) -> bool:
"""Obtain the value of enable_encryption_at_host.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_encryption_at_host")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
# set default value
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_encryption_at_host
def get_enable_ultra_ssd(self) -> bool:
"""Obtain the value of enable_ultra_ssd.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_ultra_ssd")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
# set default value
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ultra_ssd
def get_max_pods(self) -> Union[int, None]:
"""Obtain the value of max_pods.
This function will normalize the parameter by default. The parameter will be converted to int, but int 0 is
converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("max_pods")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
# set default value
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
# Note: int 0 is converted to None
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
# this parameter does not need validation
return max_pods
def get_node_osdisk_size(self) -> Union[int, None]:
"""Obtain the value of node_osdisk_size.
Note: SDK performs the following validation {'maximum': 2048, 'minimum': 0}.
This function will normalize the parameter by default. The parameter will be converted to int, but int 0 is
converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
# set default value
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
# Note: 0 is converted to None
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
# this parameter does not need validation
return node_osdisk_size
def get_node_osdisk_type(self) -> Union[str, None]:
"""Obtain the value of node_osdisk_type.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_type")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
# set default value
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_type
# pylint: disable=too-many-branches
def get_node_count_and_enable_cluster_autoscaler_and_min_count_and_max_count(
self,
) -> Tuple[int, bool, Union[int, None], Union[int, None]]:
"""Obtain the value of node_count, enable_cluster_autoscaler, min_count and max_count.
This function will verify the parameters through function "validate_counts_in_autoscaler" by default.
:return: a tuple containing four elements: node_count of int type, enable_cluster_autoscaler of bool type,
min_count of int type or None and max_count of int type or None
"""
# get agent pool profile from `mc`
agent_pool_profile = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
# node_count
# read the original value passed by the command
node_count = self.raw_param.get("node_count")
# try to read the property value corresponding to the parameter from the `mc` object
if agent_pool_profile and agent_pool_profile.count is not None:
node_count = agent_pool_profile.count
# enable_cluster_autoscaler
# read the original value passed by the command
enable_cluster_autoscaler = self.raw_param.get("enable_cluster_autoscaler")
# try to read the property value corresponding to the parameter from the `mc` object
if agent_pool_profile and agent_pool_profile.enable_auto_scaling is not None:
enable_cluster_autoscaler = agent_pool_profile.enable_auto_scaling
# min_count
# read the original value passed by the command
min_count = self.raw_param.get("min_count")
# try to read the property value corresponding to the parameter from the `mc` object
if agent_pool_profile and agent_pool_profile.min_count is not None:
min_count = agent_pool_profile.min_count
# max_count
# read the original value passed by the command
max_count = self.raw_param.get("max_count")
# try to read the property value corresponding to the parameter from the `mc` object
if agent_pool_profile and agent_pool_profile.max_count is not None:
max_count = agent_pool_profile.max_count
# these parameters do not need dynamic completion
# validation
validate_counts_in_autoscaler(
node_count,
enable_cluster_autoscaler,
min_count,
max_count,
decorator_mode=DecoratorMode.CREATE,
)
return node_count, enable_cluster_autoscaler, min_count, max_count
# pylint: disable=too-many-branches
def get_update_enable_disable_cluster_autoscaler_and_min_max_count(
self,
) -> Tuple[bool, bool, bool, Union[int, None], Union[int, None]]:
"""Obtain the value of update_cluster_autoscaler, enable_cluster_autoscaler, disable_cluster_autoscaler,
min_count and max_count.
This function will verify the parameters through function "validate_counts_in_autoscaler" by default. Besides if
both enable_cluster_autoscaler and update_cluster_autoscaler are specified, a MutuallyExclusiveArgumentError
will be raised. If enable_cluster_autoscaler or update_cluster_autoscaler is specified and there are multiple
agent pool profiles, an ArgumentUsageError will be raised. If enable_cluster_autoscaler is specified and
autoscaler is already enabled in `mc`, it will output warning messages and exit with code 0. If
update_cluster_autoscaler is specified and autoscaler is not enabled in `mc`, it will raise an
InvalidArgumentValueError. If disable_cluster_autoscaler is specified and autoscaler is not enabled in `mc`,
it will output warning messages and exit with code 0.
:return: a tuple containing four elements: update_cluster_autoscaler of bool type, enable_cluster_autoscaler
of bool type, disable_cluster_autoscaler of bool type, min_count of int type or None and max_count of int type
or None
"""
# get agent pool profile from `mc`
agent_pool_profile = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
# update_cluster_autoscaler
# read the original value passed by the command
update_cluster_autoscaler = self.raw_param.get("update_cluster_autoscaler")
# enable_cluster_autoscaler
# read the original value passed by the command
enable_cluster_autoscaler = self.raw_param.get("enable_cluster_autoscaler")
# disable_cluster_autoscaler
# read the original value passed by the command
disable_cluster_autoscaler = self.raw_param.get("disable_cluster_autoscaler")
# min_count
# read the original value passed by the command
min_count = self.raw_param.get("min_count")
# max_count
# read the original value passed by the command
max_count = self.raw_param.get("max_count")
# these parameters do not need dynamic completion
# validation
# For multi-agent pool, use the az aks nodepool command
if (enable_cluster_autoscaler or update_cluster_autoscaler) and len(self.mc.agent_pool_profiles) > 1:
raise ArgumentUsageError(
'There are more than one node pool in the cluster. Please use "az aks nodepool" command '
"to update per node pool auto scaler settings"
)
if enable_cluster_autoscaler + update_cluster_autoscaler + disable_cluster_autoscaler > 1:
raise MutuallyExclusiveArgumentError(
"Can only specify one of --enable-cluster-autoscaler, --update-cluster-autoscaler and "
"--disable-cluster-autoscaler"
)
validate_counts_in_autoscaler(
None,
enable_cluster_autoscaler or update_cluster_autoscaler,
min_count,
max_count,
decorator_mode=DecoratorMode.UPDATE,
)
if enable_cluster_autoscaler and agent_pool_profile.enable_auto_scaling:
logger.warning(
"Cluster autoscaler is already enabled for this node pool.\n"
'Please run "az aks --update-cluster-autoscaler" '
"if you want to update min-count or max-count."
)
sys.exit(0)
if update_cluster_autoscaler and not agent_pool_profile.enable_auto_scaling:
raise InvalidArgumentValueError(
"Cluster autoscaler is not enabled for this node pool.\n"
'Run "az aks nodepool update --enable-cluster-autoscaler" '
"to enable cluster with min-count and max-count."
)
if disable_cluster_autoscaler and not agent_pool_profile.enable_auto_scaling:
logger.warning(
"Cluster autoscaler is already disabled for this node pool."
)
sys.exit(0)
return update_cluster_autoscaler, enable_cluster_autoscaler, disable_cluster_autoscaler, min_count, max_count
def get_admin_username(self) -> str:
"""Obtain the value of admin_username.
Note: SDK performs the following validation {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'}.
:return: str
"""
# read the original value passed by the command
admin_username = self.raw_param.get("admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.admin_username is not None
):
admin_username = self.mc.linux_profile.admin_username
# this parameter does not need dynamic completion
# this parameter does not need validation
return admin_username
# pylint: disable=unused-argument
def _get_windows_admin_username_and_password(
self, read_only: bool = False, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Internal function to dynamically obtain the value of windows_admin_username and windows_admin_password
according to the context.
When ont of windows_admin_username and windows_admin_password is not assigned, dynamic completion will be
triggerd. The user will be prompted to enter the missing windows_admin_username or windows_admin_password in
tty (pseudo terminal). If the program is running in a non-interactive environment, a NoTTYError error will be
raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: a tuple containing two elements: windows_admin_username of string type or None and
windows_admin_password of string type or None
"""
# windows_admin_username
# read the original value passed by the command
windows_admin_username = self.raw_param.get("windows_admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
username_read_from_mc = False
if (
self.mc and
self.mc.windows_profile and
self.mc.windows_profile.admin_username is not None
):
windows_admin_username = self.mc.windows_profile.admin_username
username_read_from_mc = True
# windows_admin_password
# read the original value passed by the command
windows_admin_password = self.raw_param.get("windows_admin_password")
# try to read the property value corresponding to the parameter from the `mc` object
password_read_from_mc = False
if (
self.mc and
self.mc.windows_profile and
self.mc.windows_profile.admin_password is not None
):
windows_admin_password = self.mc.windows_profile.admin_password
password_read_from_mc = True
# consistent check
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if read_only:
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_username is set but windows_admin_password is not
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
# these parameters does not need validation
return windows_admin_username, windows_admin_password
def get_windows_admin_username_and_password(
self,
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the value of windows_admin_username and windows_admin_password according to the context.
When ont of windows_admin_username and windows_admin_password is not assigned, dynamic completion will be
triggerd. The user will be prompted to enter the missing windows_admin_username or windows_admin_password in
tty (pseudo terminal). If the program is running in a non-interactive environment, a NoTTYError error will be
raised.
:return: a tuple containing two elements: windows_admin_username of string type or None and
windows_admin_password of string type or None
"""
return self._get_windows_admin_username_and_password()
def get_enable_ahub(self) -> bool:
"""Obtain the value of enable_ahub.
Note: enable_ahub will not be directly decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
enable_ahub = self.raw_param.get("enable_ahub")
# try to read the property value corresponding to the parameter from the `mc` object
if self.mc and self.mc.windows_profile:
enable_ahub = self.mc.windows_profile.license_type == "Windows_Server"
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ahub
# pylint: disable=unused-argument,too-many-statements
def _get_service_principal_and_client_secret(
self, read_only: bool = False, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Internal function to dynamically obtain the values of service_principal and client_secret according to the
context.
This function will store an intermediate aad_session_key.
When service_principal and client_secret are not assigned and enable_managed_identity is True, dynamic
completion will not be triggered. For other cases, dynamic completion will be triggered.
When client_secret is given but service_principal is not, dns_name_prefix or fqdn_subdomain will be used to
create a service principal. The parameters subscription_id, location and name (cluster) are also required when
calling function "_ensure_aks_service_principal", which internally used GraphRbacManagementClient to send
the request.
When service_principal is given but client_secret is not, function "_ensure_aks_service_principal" would raise
CLIError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: a tuple containing two elements: service_principal of string type or None and client_secret of
string type or None
"""
# service_principal
# read the original value passed by the command
service_principal = self.raw_param.get("service_principal")
# try to read the property value corresponding to the parameter from the `mc` object
sp_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.client_id is not None
):
service_principal = self.mc.service_principal_profile.client_id
sp_read_from_mc = True
# client_secret
# read the original value passed by the command
client_secret = self.raw_param.get("client_secret")
# try to read the property value corresponding to the parameter from the `mc` object
secret_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.secret is not None
):
client_secret = self.mc.service_principal_profile.secret
secret_read_from_mc = True
# consistent check
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if read_only:
return service_principal, client_secret
# dynamic completion for service_principal and client_secret
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
enable_managed_identity = self._get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_subscription_id(),
dns_name_prefix=self._get_dns_name_prefix(enable_validation=False),
fqdn_subdomain=self._get_fqdn_subdomain(enable_validation=False),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
self.set_intermediate("aad_session_key", principal_obj.get("aad_session_key"), overwrite_exists=True)
# these parameters do not need validation
return service_principal, client_secret
def get_service_principal_and_client_secret(
self
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
When service_principal and client_secret are not assigned and enable_managed_identity is True, dynamic
completion will not be triggered. For other cases, dynamic completion will be triggered.
When client_secret is given but service_principal is not, dns_name_prefix or fqdn_subdomain will be used to
create a service principal. The parameters subscription_id, location and name (cluster) are also required when
calling function "_ensure_aks_service_principal", which internally used GraphRbacManagementClient to send
the request.
When service_principal is given but client_secret is not, function "_ensure_aks_service_principal" would raise
CLIError.
:return: a tuple containing two elements: service_principal of string type or None and client_secret of
string type or None
"""
return self._get_service_principal_and_client_secret()
# pylint: disable=unused-argument
def _get_enable_managed_identity(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> bool:
"""Internal function to dynamically obtain the values of service_principal and client_secret according to the
context.
Note: enable_managed_identity will not be directly decorated into the `mc` object.
When both service_principal and client_secret are assigned and enable_managed_identity is True, dynamic
completion will be triggered. The value of enable_managed_identity will be set to False.
This function supports the option of enable_validation. When enabled, if enable_managed_identity is not
specified and assign_identity is assigned, a RequiredArgumentMissingError will be raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: bool
"""
# read the original value passed by the command
enable_managed_identity = self.raw_param.get("enable_managed_identity")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if self.mc and self.mc.identity:
enable_managed_identity = check_is_msi_cluster(self.mc)
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return enable_managed_identity
# dynamic completion
(
service_principal,
client_secret,
) = self._get_service_principal_and_client_secret(read_only=True)
if not read_from_mc and service_principal and client_secret:
enable_managed_identity = False
# validation
if enable_validation:
if not enable_managed_identity and self._get_assign_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-identity can only be specified when --enable-managed-identity is specified"
)
return enable_managed_identity
def get_enable_managed_identity(self) -> bool:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
Note: enable_managed_identity will not be directly decorated into the `mc` object.
When both service_principal and client_secret are assigned and enable_managed_identity is True, dynamic
completion will be triggered. The value of enable_managed_identity will be set to False.
This function will verify the parameter by default. If enable_managed_identity is not specified and
assign_identity is assigned, a RequiredArgumentMissingError will be raised.
:return: bool
"""
return self._get_enable_managed_identity(enable_validation=True)
def get_skip_subnet_role_assignment(self) -> bool:
"""Obtain the value of skip_subnet_role_assignment.
Note: skip_subnet_role_assignment will not be decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
skip_subnet_role_assignment = self.raw_param.get("skip_subnet_role_assignment")
# this parameter does not need dynamic completion
# this parameter does not need validation
return skip_subnet_role_assignment
# pylint: disable=unused-argument
def _get_assign_identity(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to obtain the value of assign_identity.
This function supports the option of enable_validation. When enabled, if enable_managed_identity is not
specified and assign_identity is assigned, a RequiredArgumentMissingError will be raised. Besides, if
assign_identity is not assigned but assign_kubelet_identity is, a RequiredArgumentMissingError will be raised.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("assign_identity")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if (
self.mc and
self.mc.identity and
self.mc.identity.user_assigned_identities is not None
):
value_obtained_from_mc = safe_list_get(
list(self.mc.identity.user_assigned_identities.keys()), 0, None
)
# set default value
if value_obtained_from_mc is not None:
assign_identity = value_obtained_from_mc
else:
assign_identity = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
if assign_identity:
if not self._get_enable_managed_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-identity can only be specified when --enable-managed-identity is specified"
)
else:
if self.get_assign_kubelet_identity():
raise RequiredArgumentMissingError(
"--assign-kubelet-identity can only be specified when --assign-identity is specified"
)
return assign_identity
def get_assign_identity(self) -> Union[str, None]:
"""Obtain the value of assign_identity.
This function will verify the parameter by default. If enable_managed_identity is not specified and
assign_identity is assigned, a RequiredArgumentMissingError will be raised. Besides, if assign_identity is not
assigned but assign_kubelet_identity is, a RequiredArgumentMissingError will be raised.
:return: string or None
"""
return self._get_assign_identity(enable_validation=True)
def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:
"""Helper function to obtain the identity object by msi client.
Note: This is a wrapper of the external function "_get_user_assigned_identity", and the return result of this
function will not be directly decorated into the `mc` object.
This function will use ManagedServiceIdentityClient to send the request, and return an identity object.
ResourceNotFoundError, ClientRequestError or InvalidArgumentValueError exceptions might be raised in the above
process.
:return: string
"""
return _get_user_assigned_identity(self.cmd.cli_ctx, assigned_identity)
def get_user_assigned_identity_client_id(self) -> str:
"""Helper function to obtain the client_id of user assigned identity.
Note: This is not a parameter of aks_create, and it will not be decorated into the `mc` object.
Parse assign_identity and use ManagedServiceIdentityClient to send the request, get the client_id field in the
returned identity object. ResourceNotFoundError, ClientRequestError or InvalidArgumentValueError exceptions
may be raised in the above process.
:return: string
"""
assigned_identity = self.get_assign_identity()
if assigned_identity is None or assigned_identity == "":
raise RequiredArgumentMissingError("No assigned identity provided.")
return self.get_identity_by_msi_client(assigned_identity).client_id
def get_user_assigned_identity_object_id(self) -> str:
"""Helper function to obtain the principal_id of user assigned identity.
Note: This is not a parameter of aks_create, and it will not be decorated into the `mc` object.
Parse assign_identity and use ManagedServiceIdentityClient to send the request, get the principal_id field in
the returned identity object. ResourceNotFoundError, ClientRequestError or InvalidArgumentValueError exceptions
may be raised in the above process.
:return: string
"""
assigned_identity = self.get_assign_identity()
if assigned_identity is None or assigned_identity == "":
raise RequiredArgumentMissingError("No assigned identity provided.")
return self.get_identity_by_msi_client(assigned_identity).principal_id
def get_yes(self) -> bool:
"""Obtain the value of yes.
Note: yes will not be decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
yes = self.raw_param.get("yes")
# this parameter does not need dynamic completion
# this parameter does not need validation
return yes
def get_no_wait(self) -> bool:
"""Obtain the value of no_wait.
Note: no_wait will not be decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
no_wait = self.raw_param.get("no_wait")
# this parameter does not need dynamic completion
# this parameter does not need validation
return no_wait
def get_attach_acr(self) -> Union[str, None]:
"""Obtain the value of attach_acr.
Note: attach_acr will not be decorated into the `mc` object.
This function will verify the parameter by default in create mode. When attach_acr is assigned, if both
enable_managed_identity and no_wait are assigned, a MutuallyExclusiveArgumentError will be raised; if
service_principal is not assigned, raise a RequiredArgumentMissingError.
:return: string or None
"""
# read the original value passed by the command
attach_acr = self.raw_param.get("attach_acr")
# this parameter does not need dynamic completion
# validation
if self.decorator_mode == DecoratorMode.CREATE and attach_acr:
if self._get_enable_managed_identity(enable_validation=False) and self.get_no_wait():
raise MutuallyExclusiveArgumentError(
"When --attach-acr and --enable-managed-identity are both specified, "
"--no-wait is not allowed, please wait until the whole operation succeeds."
)
# Attach acr operation will be handled after the cluster is created
# newly added check, check whether client_id exists before creating role assignment
service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)
if not service_principal:
raise RequiredArgumentMissingError(
"No service principal provided to create the acrpull role assignment for acr."
)
return attach_acr
def get_detach_acr(self) -> Union[str, None]:
"""Obtain the value of detach_acr.
Note: detach_acr will not be decorated into the `mc` object.
:return: string or None
"""
# read the original value passed by the command
detach_acr = self.raw_param.get("detach_acr")
# this parameter does not need dynamic completion
# this parameter does not need validation
return detach_acr
# pylint: disable=unused-argument
def _get_load_balancer_sku(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
"""Internal function to dynamically obtain the value of load_balancer_sku according to the context.
Note: When returning a string, it will always be lowercase.
When load_balancer_sku is not assigned, dynamic completion will be triggerd. Function "set_load_balancer_sku"
will be called and the corresponding load balancer sku will be returned according to the value of
kubernetes_version.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic", if so, when api_server_authorized_ip_ranges is assigned or enable_private_cluster is specified,
raise an InvalidArgumentValueError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
load_balancer_sku = safe_lower(self.raw_param.get("load_balancer_sku"))
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_sku is not None
):
load_balancer_sku = safe_lower(
self.mc.network_profile.load_balancer_sku
)
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return load_balancer_sku
# dynamic completion
if not read_from_mc and load_balancer_sku is None:
load_balancer_sku = safe_lower(
set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
)
# validation
if enable_validation:
if load_balancer_sku == "basic":
if self.get_api_server_authorized_ip_ranges():
raise InvalidArgumentValueError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
if self.get_enable_private_cluster():
raise InvalidArgumentValueError(
"Please use standard load balancer for private cluster"
)
return load_balancer_sku
def get_load_balancer_sku(self) -> Union[str, None]:
"""Dynamically obtain the value of load_balancer_sku according to the context.
Note: When returning a string, it will always be lowercase.
When load_balancer_sku is not assigned, dynamic completion will be triggerd. Function "set_load_balancer_sku"
will be called and the corresponding load balancer sku will be returned according to the value of
kubernetes_version.
This function will verify the parameter by default. It will check if load_balancer_sku equals to "basic", if so,
when api_server_authorized_ip_ranges is assigned or enable_private_cluster is specified,
raise an InvalidArgumentValueError.
:return: string or None
"""
return safe_lower(self._get_load_balancer_sku(enable_validation=True))
def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:
"""Obtain the value of load_balancer_managed_outbound_ip_count.
Note: SDK performs the following validation {'maximum': 100, 'minimum': 1}.
:return: int or None
"""
# read the original value passed by the command
load_balancer_managed_outbound_ip_count = self.raw_param.get(
"load_balancer_managed_outbound_ip_count"
)
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None
):
load_balancer_managed_outbound_ip_count = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_managed_outbound_ip_count
def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:
"""Obtain the value of load_balancer_outbound_ips.
Note: SDK performs the following validation {'maximum': 16, 'minimum': 1}.
:return: string, list of ResourceReference, or None
"""
# read the original value passed by the command
load_balancer_outbound_ips = self.raw_param.get(
"load_balancer_outbound_ips"
)
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_i_ps and
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None
):
load_balancer_outbound_ips = (
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ips
def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:
"""Obtain the value of load_balancer_outbound_ip_prefixes.
:return: string, list of ResourceReference, or None
"""
# read the original value passed by the command
load_balancer_outbound_ip_prefixes = self.raw_param.get(
"load_balancer_outbound_ip_prefixes"
)
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None
):
load_balancer_outbound_ip_prefixes = (
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ip_prefixes
def get_load_balancer_outbound_ports(self) -> Union[int, None]:
"""Obtain the value of load_balancer_outbound_ports.
Note: SDK performs the following validation {'maximum': 64000, 'minimum': 0}.
:return: int or None
"""
# read the original value passed by the command
load_balancer_outbound_ports = self.raw_param.get(
"load_balancer_outbound_ports"
)
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None
):
load_balancer_outbound_ports = (
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_outbound_ports
def get_load_balancer_idle_timeout(self) -> Union[int, None]:
"""Obtain the value of load_balancer_idle_timeout.
Note: SDK performs the following validation {'maximum': 120, 'minimum': 4}.
:return: int or None
"""
# read the original value passed by the command
load_balancer_idle_timeout = self.raw_param.get(
"load_balancer_idle_timeout"
)
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes is not None
):
load_balancer_idle_timeout = (
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes
)
# this parameter does not need dynamic completion
# this parameter does not need validation
return load_balancer_idle_timeout
# pylint: disable=unused-argument
def _get_outbound_type(
self,
enable_validation: bool = False,
read_only: bool = False,
load_balancer_profile: ManagedClusterLoadBalancerProfile = None,
**kwargs
) -> Union[str, None]:
"""Internal functin to dynamically obtain the value of outbound_type according to the context.
Note: All the external parameters involved in the validation are not verified in their own getters.
When outbound_type is not assigned, dynamic completion will be triggerd. By default, the value is set to
CONST_OUTBOUND_TYPE_LOAD_BALANCER.
This function supports the option of enable_validation. When enabled, if the value of outbound_type is
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, the following checks will be performed. If load_balancer_sku is set
to basic, an InvalidArgumentValueError will be raised. If vnet_subnet_id is not assigned,
a RequiredArgumentMissingError will be raised. If any of load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips or load_balancer_outbound_ip_prefixes is assigned, a MutuallyExclusiveArgumentError
will be raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
This function supports the option of load_balancer_profile, if provided, when verifying loadbalancer-related
parameters, the value in load_balancer_profile will be used for validation.
:return: string or None
"""
# read the original value passed by the command
outbound_type = self.raw_param.get("outbound_type")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.outbound_type is not None
):
outbound_type = self.mc.network_profile.outbound_type
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return outbound_type
# dynamic completion
if not read_from_mc and outbound_type != CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING:
outbound_type = CONST_OUTBOUND_TYPE_LOAD_BALANCER
# validation
# Note: The parameters involved in the validation are not verified in their own getters.
if enable_validation:
if outbound_type == CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING:
# Should not enable read_only for get_load_balancer_sku, since its default value is None, and it has
# not been decorated into the mc object at this time, only the value after dynamic completion is
# meaningful here.
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"userDefinedRouting doesn't support basic load balancer sku"
)
if self.get_vnet_subnet_id() in ["", None]:
raise RequiredArgumentMissingError(
"--vnet-subnet-id must be specified for userDefinedRouting and it must "
"be pre-configured with a route table with egress rules"
)
if load_balancer_profile:
if (
load_balancer_profile.managed_outbound_i_ps or
load_balancer_profile.outbound_i_ps or
load_balancer_profile.outbound_ip_prefixes
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing a standard load balancer with IP addresses"
)
else:
if (
self.get_load_balancer_managed_outbound_ip_count() or
self.get_load_balancer_outbound_ips() or
self.get_load_balancer_outbound_ip_prefixes()
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing a standard load balancer with IP addresses"
)
return outbound_type
def get_outbound_type(
self,
load_balancer_profile: ManagedClusterLoadBalancerProfile = None
) -> Union[str, None]:
"""Dynamically obtain the value of outbound_type according to the context.
Note: All the external parameters involved in the validation are not verified in their own getters.
When outbound_type is not assigned, dynamic completion will be triggerd. By default, the value is set to
CONST_OUTBOUND_TYPE_LOAD_BALANCER.
This function will verify the parameter by default. If the value of outbound_type is
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, the following checks will be performed. If load_balancer_sku is set
to basic, an InvalidArgumentValueError will be raised. If vnet_subnet_id is not assigned,
a RequiredArgumentMissingError will be raised. If any of load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips or load_balancer_outbound_ip_prefixes is assigned, a MutuallyExclusiveArgumentError
will be raised.
This function supports the option of load_balancer_profile, if provided, when verifying loadbalancer-related
parameters, the value in load_balancer_profile will be used for validation.
:return: string or None
"""
return self._get_outbound_type(
enable_validation=True, load_balancer_profile=load_balancer_profile
)
# pylint: disable=unused-argument
def _get_network_plugin(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to Obtain the value of network_plugin.
Note: SDK provides default value "kubenet" for network_plugin.
This function supports the option of enable_validation. When enabled, in case network_plugin is assigned, if
pod_cidr is assigned and the value of network_plugin is azure, an InvalidArgumentValueError will be
raised; otherwise, if any of pod_cidr, service_cidr, dns_service_ip, docker_bridge_address or network_policy
is assigned, a RequiredArgumentMissingError will be raised.
:return: string or None
"""
# read the original value passed by the command
network_plugin = self.raw_param.get("network_plugin")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.network_plugin is not None
):
network_plugin = self.mc.network_profile.network_plugin
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
) = (
self.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
)
if network_plugin:
if network_plugin == "azure" and pod_cidr:
raise InvalidArgumentValueError(
"Please use kubenet as the network plugin type when pod_cidr is specified"
)
else:
if (
pod_cidr or
service_cidr or
dns_service_ip or
docker_bridge_address or
network_policy
):
raise RequiredArgumentMissingError(
"Please explicitly specify the network plugin type"
)
return network_plugin
def get_network_plugin(self) -> Union[str, None]:
"""Obtain the value of network_plugin.
Note: SDK provides default value "kubenet" for network_plugin.
This function will verify the parameter by default. In case network_plugin is assigned, if pod_cidr is assigned
and the value of network_plugin is azure, an InvalidArgumentValueError will be raised; otherwise, if any of
pod_cidr, service_cidr, dns_service_ip, docker_bridge_address or network_policy is assigned, a
RequiredArgumentMissingError will be raised.
:return: string or None
"""
return self._get_network_plugin(enable_validation=True)
def get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
self,
) -> Tuple[
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
]:
"""Obtain the value of pod_cidr, service_cidr, dns_service_ip, docker_bridge_address and network_policy.
Note: SDK provides default value "10.244.0.0/16" and performs the following validation
{'pattern': r'^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\/([0-9]|[1-2][0-9]|3[0-2]))?$'} for pod_cidr.
Note: SDK provides default value "10.0.0.0/16" and performs the following validation
{'pattern': r'^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\/([0-9]|[1-2][0-9]|3[0-2]))?$'} for service_cidr.
Note: SDK provides default value "10.0.0.10" and performs the following validation
{'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'}
for dns_service_ip.
Note: SDK provides default value "172.17.0.1/16" and performs the following validation
{'pattern': r'^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\/([0-9]|[1-2][0-9]|3[0-2]))?$'} for docker_bridge_address.
This function will verify the parameters by default. If pod_cidr is assigned and the value of network_plugin
is azure, an InvalidArgumentValueError will be raised; otherwise, if any of pod_cidr, service_cidr,
dns_service_ip, docker_bridge_address or network_policy is assigned, a RequiredArgumentMissingError will be
raised.
:return: a tuple of five elements: pod_cidr of string type or None, service_cidr of string type or None,
dns_service_ip of string type or None, docker_bridge_address of string type or None, network_policy of
string type or None.
"""
# get network profile from `mc`
network_profile = None
if self.mc:
network_profile = self.mc.network_profile
# pod_cidr
# read the original value passed by the command
pod_cidr = self.raw_param.get("pod_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.pod_cidr is not None:
pod_cidr = network_profile.pod_cidr
# service_cidr
# read the original value passed by the command
service_cidr = self.raw_param.get("service_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.service_cidr is not None:
service_cidr = network_profile.service_cidr
# dns_service_ip
# read the original value passed by the command
dns_service_ip = self.raw_param.get("dns_service_ip")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.dns_service_ip is not None:
dns_service_ip = network_profile.dns_service_ip
# docker_bridge_address
# read the original value passed by the command
docker_bridge_address = self.raw_param.get("docker_bridge_address")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.docker_bridge_cidr is not None:
docker_bridge_address = network_profile.docker_bridge_cidr
# network_policy
# read the original value passed by the command
network_policy = self.raw_param.get("network_policy")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.network_policy is not None:
network_policy = network_profile.network_policy
# these parameters do not need dynamic completion
# validation
network_plugin = self._get_network_plugin(enable_validation=False)
if network_plugin:
if network_plugin == "azure" and pod_cidr:
raise InvalidArgumentValueError(
"Please use kubenet as the network plugin type when pod_cidr is specified"
)
else:
if (
pod_cidr or
service_cidr or
dns_service_ip or
docker_bridge_address or
network_policy
):
raise RequiredArgumentMissingError(
"Please explicitly specify the network plugin type"
)
return pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy
# pylint: disable=unused-argument
def _get_enable_addons(self, enable_validation: bool = False, **kwargs) -> List[str]:
"""Internal function to obtain the value of enable_addons.
Note: enable_addons will not be directly decorated into the `mc` object and we do not support to fetch it from
`mc`.
Note: Some of the external parameters involved in the validation are not verified in their own getters.
This function supports the option of enable_validation. When enabled, it will check whether the provided addons
have duplicate or invalid values, and raise an InvalidArgumentValueError if found. Besides, if monitoring is
specified in enable_addons but workspace_resource_id is not assigned, or virtual-node is specified but
aci_subnet_name or vnet_subnet_id is not, a RequiredArgumentMissingError will be raised.
This function will normalize the parameter by default. It will split the string into a list with "," as the
delimiter.
:return: empty list or list of strings
"""
# read the original value passed by the command
enable_addons = self.raw_param.get("enable_addons")
# normalize
enable_addons = enable_addons.split(',') if enable_addons else []
# validation
if enable_validation:
# check duplicate addons
duplicate_addons_set = {
x for x in enable_addons if enable_addons.count(x) >= 2
}
if len(duplicate_addons_set) != 0:
raise InvalidArgumentValueError(
"Duplicate addon{} '{}' found in option --enable-addons.".format(
"s" if len(duplicate_addons_set) > 1 else "",
",".join(duplicate_addons_set),
)
)
# check unrecognized addons
enable_addons_set = set(enable_addons)
invalid_addons_set = enable_addons_set.difference(ADDONS.keys())
if len(invalid_addons_set) != 0:
raise InvalidArgumentValueError(
"'{}' {} not recognized by the --enable-addons argument.".format(
",".join(invalid_addons_set),
"are" if len(invalid_addons_set) > 1 else "is",
)
)
# check monitoring/workspace_resource_id
workspace_resource_id = self._get_workspace_resource_id(read_only=True)
if "monitoring" not in enable_addons and workspace_resource_id:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# check virtual node/aci_subnet_name/vnet_subnet_id
# Note: The external parameters involved in the validation are not verified in their own getters.
aci_subnet_name = self.get_aci_subnet_name()
vnet_subnet_id = self.get_vnet_subnet_id()
if "virtual-node" in enable_addons and not (aci_subnet_name and vnet_subnet_id):
raise RequiredArgumentMissingError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
return enable_addons
def get_enable_addons(self) -> List[str]:
"""Obtain the value of enable_addons.
Note: enable_addons will not be directly decorated into the `mc` object and we do not support to fetch it from
`mc`.
Note: Some of the external parameters involved in the validation are not verified in their own getters.
This function will verify the parameters by default. It will check whether the provided addons have duplicate or
invalid values, and raise an InvalidArgumentValueError if found. Besides, if monitoring is specified in
enable_addons but workspace_resource_id is not assigned, or virtual-node is specified but aci_subnet_name or
vnet_subnet_id is not, a RequiredArgumentMissingError will be raised.
This function will normalize the parameter by default. It will split the string into a list with "," as the
delimiter.
:return: empty list or list of strings
"""
return self._get_enable_addons(enable_validation=True)
# pylint: disable=unused-argument
def _get_workspace_resource_id(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
"""Internal function to dynamically obtain the value of workspace_resource_id according to the context.
When workspace_resource_id is not assigned, dynamic completion will be triggerd. Function
"_ensure_default_log_analytics_workspace_for_monitoring" will be called to create a workspace with
subscription_id and resource_group_name, which internally used ResourceManagementClient to send the request.
This function supports the option of enable_validation. When enabled, it will check if workspace_resource_id is
assigned but 'monitoring' is not specified in enable_addons, if so, raise a RequiredArgumentMissingError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
workspace_resource_id = self.raw_param.get("workspace_resource_id")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.addon_profiles and
CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) is not None
):
workspace_resource_id = self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID)
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return workspace_resource_id
# dynamic completion
if not read_from_mc:
if workspace_resource_id is None:
# use default workspace if exists else create default workspace
workspace_resource_id = (
_ensure_default_log_analytics_workspace_for_monitoring(
self.cmd,
self.get_subscription_id(),
self.get_resource_group_name(),
)
)
# normalize
workspace_resource_id = "/" + workspace_resource_id.strip(" /")
# validation
if enable_validation:
enable_addons = self._get_enable_addons(enable_validation=False)
if workspace_resource_id and "monitoring" not in enable_addons:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# this parameter does not need validation
return workspace_resource_id
def get_workspace_resource_id(self) -> Union[str, None]:
"""Dynamically obtain the value of workspace_resource_id according to the context.
When workspace_resource_id is not assigned, dynamic completion will be triggerd. Function
"_ensure_default_log_analytics_workspace_for_monitoring" will be called to create a workspace with
subscription_id and resource_group_name, which internally used ResourceManagementClient to send the request.
:return: string or None
"""
return self._get_workspace_resource_id(enable_validation=True)
# pylint: disable=no-self-use
def get_virtual_node_addon_os_type(self) -> str:
"""Helper function to obtain the os_type of virtual node addon.
Note: This is not a parameter of aks_create.
:return: string
"""
return "Linux"
def get_aci_subnet_name(self) -> Union[str, None]:
"""Obtain the value of aci_subnet_name.
:return: string or None
"""
# read the original value passed by the command
aci_subnet_name = self.raw_param.get("aci_subnet_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None
):
aci_subnet_name = self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return aci_subnet_name
def get_appgw_name(self) -> Union[str, None]:
"""Obtain the value of appgw_name.
:return: string or None
"""
# read the original value passed by the command
appgw_name = self.raw_param.get("appgw_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME) is not None
):
appgw_name = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_name
def get_appgw_subnet_cidr(self) -> Union[str, None]:
"""Obtain the value of appgw_subnet_cidr.
:return: string or None
"""
# read the original value passed by the command
appgw_subnet_cidr = self.raw_param.get("appgw_subnet_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None
):
appgw_subnet_cidr = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_cidr
def get_appgw_id(self) -> Union[str, None]:
"""Obtain the value of appgw_id.
:return: string or None
"""
# read the original value passed by the command
appgw_id = self.raw_param.get("appgw_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None
):
appgw_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_id
def get_appgw_subnet_id(self) -> Union[str, None]:
"""Obtain the value of appgw_subnet_id.
:return: string or None
"""
# read the original value passed by the command
appgw_subnet_id = self.raw_param.get("appgw_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None
):
appgw_subnet_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_id
def get_appgw_watch_namespace(self) -> Union[str, None]:
"""Obtain the value of appgw_watch_namespace.
:return: string or None
"""
# read the original value passed by the command
appgw_watch_namespace = self.raw_param.get("appgw_watch_namespace")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE) is not None
):
appgw_watch_namespace = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_watch_namespace
def get_enable_sgxquotehelper(self) -> bool:
"""Obtain the value of enable_sgxquotehelper.
:return: bool
"""
# read the original value passed by the command
enable_sgxquotehelper = self.raw_param.get("enable_sgxquotehelper")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None
):
enable_sgxquotehelper = self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == "true"
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_sgxquotehelper
# pylint: disable=unused-argument
def _get_enable_aad(self, enable_validation: bool = False, **kwargs) -> bool:
"""Internal function to obtain the value of enable_aad.
This function supports the option of enable_validation. When enabled, if the value of enable_aad is True and
any of aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a
MutuallyExclusiveArgumentError will be raised. If the value of enable_aad is False and the value of
enable_azure_rbac is True, a RequiredArgumentMissingError will be raised.
:return: bool
"""
# read the original value passed by the command
enable_aad = self.raw_param.get("enable_aad")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.managed is not None
):
enable_aad = self.mc.aad_profile.managed
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
) = (
self.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):
raise RequiredArgumentMissingError(
"--enable-azure-rbac can only be used together with --enable-aad"
)
return enable_aad
def get_enable_aad(self) -> bool:
"""Obtain the value of enable_aad.
This function will verify the parameter by default. If the value of enable_aad is True and any of
aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError
will be raised. If the value of enable_aad is False and the value of enable_azure_rbac is True,
a RequiredArgumentMissingError will be raised.
:return: bool
"""
return self._get_enable_aad(enable_validation=True)
def get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
self,
) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:
"""Obtain the value of aad_client_app_id, aad_server_app_id and aad_server_app_secret.
This function will verify the parameters by default. If the value of enable_aad is True and any of
aad_client_app_id, aad_server_app_id or aad_server_app_secret is asssigned, a MutuallyExclusiveArgumentError
will be raised.
:return: a tuple of three elements: aad_client_app_id of string type or None, aad_server_app_id of string type
or None and aad_server_app_secret of string type or None.
"""
# get aad profile from `mc`
aad_profile = None
if self.mc:
aad_profile = self.mc.aad_profile
# read the original value passed by the command
aad_client_app_id = self.raw_param.get("aad_client_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.client_app_id is not None:
aad_client_app_id = aad_profile.client_app_id
# read the original value passed by the command
aad_server_app_id = self.raw_param.get("aad_server_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_id is not None:
aad_server_app_id = aad_profile.server_app_id
# read the original value passed by the command
aad_server_app_secret = self.raw_param.get("aad_server_app_secret")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_secret is not None:
aad_server_app_secret = aad_profile.server_app_secret
# these parameters do not need dynamic completion
# validation
enable_aad = self._get_enable_aad(enable_validation=False)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
return aad_client_app_id, aad_server_app_id, aad_server_app_secret
# pylint: disable=unused-argument
def _get_aad_tenant_id(self, read_only: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to dynamically obtain the value of aad_server_app_secret according to the context.
When both aad_tenant_id and enable_aad are not assigned, and any of aad_client_app_id, aad_server_app_id or
aad_server_app_secret is asssigned, dynamic completion will be triggerd. Class
"azure.cli.core._profile.Profile" will be instantiated, and then call its "get_login_credentials" method to
get the tenant of the deployment subscription.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
# read the original value passed by the command
aad_tenant_id = self.raw_param.get("aad_tenant_id")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.tenant_id is not None
):
aad_tenant_id = self.mc.aad_profile.tenant_id
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return aad_tenant_id
# dynamic completion
if not read_from_mc and not self._get_enable_aad(
enable_validation=False
):
if aad_tenant_id is None and any(
self.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
):
profile = Profile(cli_ctx=self.cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
# this parameter does not need validation
return aad_tenant_id
def get_aad_tenant_id(self) -> Union[str, None]:
"""Dynamically obtain the value of aad_server_app_secret according to the context.
When both aad_tenant_id and enable_aad are not assigned, and any of aad_client_app_id, aad_server_app_id or
aad_server_app_secret is asssigned, dynamic completion will be triggerd. Class
"azure.cli.core._profile.Profile" will be instantiated, and then call its "get_login_credentials" method to
get the tenant of the deployment subscription.
:return: string or None
"""
return self._get_aad_tenant_id()
def get_aad_admin_group_object_ids(self) -> Union[List[str], None]:
"""Obtain the value of aad_admin_group_object_ids.
This function will normalize the parameter by default. It will split the string into a list with "," as the
delimiter.
:return: empty list or list of strings, or None
"""
# read the original value passed by the command
aad_admin_group_object_ids = self.raw_param.get("aad_admin_group_object_ids")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.admin_group_object_i_ds is not None
):
aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds
read_from_mc = True
# keep None as None, but empty string ("") to empty list ([])
if not read_from_mc and aad_admin_group_object_ids is not None:
aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []
# this parameter does not need validation
return aad_admin_group_object_ids
# pylint: disable=unused-argument
def _get_disable_rbac(self, enable_validation: bool = False, **kwargs) -> Union[bool, None]:
"""Internal function to obtain the value of disable_rbac.
This function supports the option of enable_validation. When enabled, if the values of disable_rbac and
enable_azure_rbac are both True, a MutuallyExclusiveArgumentError will be raised. Besides, if the values of
enable_rbac and disable_rbac are both True, a MutuallyExclusiveArgumentError will be raised.
:return: bool or None
"""
# read the original value passed by the command
disable_rbac = self.raw_param.get("disable_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.enable_rbac is not None
):
disable_rbac = not self.mc.enable_rbac
# this parameter does not need dynamic completion
# validation
if enable_validation:
if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--enable-azure-rbac cannot be used together with --disable-rbac"
)
if disable_rbac and self.get_enable_rbac():
raise MutuallyExclusiveArgumentError("specify either '--disable-rbac' or '--enable-rbac', not both.")
return disable_rbac
def get_disable_rbac(self) -> Union[bool, None]:
"""Obtain the value of disable_rbac.
This function will verify the parameter by default. If the values of disable_rbac and enable_azure_rbac are
both True, a MutuallyExclusiveArgumentError will be raised. Besides, if the values of enable_rbac and
disable_rbac are both True, a MutuallyExclusiveArgumentError will be raised.
:return: bool or None
"""
return self._get_disable_rbac(enable_validation=True)
def get_enable_rbac(self) -> Union[bool, None]:
"""Obtain the value of enable_rbac.
This function will verify the parameter by default. If the values of enable_rbac and disable_rbac are both True,
a MutuallyExclusiveArgumentError will be raised.
:return: bool or None
"""
# read the original value passed by the command
enable_rbac = self.raw_param.get("enable_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.enable_rbac is not None
):
enable_rbac = self.mc.enable_rbac
# this parameter does not need dynamic completion
# validation
if enable_rbac and self._get_disable_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError("specify either '--disable-rbac' or '--enable-rbac', not both.")
return enable_rbac
# pylint: disable=unused-argument
def _get_enable_azure_rbac(self, enable_validation: bool = False, **kwargs) -> bool:
"""Internal function to obtain the value of enable_azure_rbac.
This function supports the option of enable_validation. When enabled, if the values of disable_rbac and
enable_azure_rbac are both True, a MutuallyExclusiveArgumentError will be raised. If the value of enable_aad
is False and the value of enable_azure_rbac is True, a RequiredArgumentMissingError will be raised.
:return: bool
"""
# read the original value passed by the command
enable_azure_rbac = self.raw_param.get("enable_azure_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.enable_azure_rbac is not None
):
enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac
# this parameter does not need dynamic completion
# validation
if enable_validation:
if enable_azure_rbac and self._get_disable_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--enable-azure-rbac cannot be used together with --disable-rbac"
)
if enable_azure_rbac and not self._get_enable_aad(enable_validation=False):
raise RequiredArgumentMissingError(
"--enable-azure-rbac can only be used together with --enable-aad"
)
return enable_azure_rbac
def get_enable_azure_rbac(self) -> bool:
"""Obtain the value of enable_azure_rbac.
This function will verify the parameter by default. If the values of disable_rbac and enable_azure_rbac are
both True, a MutuallyExclusiveArgumentError will be raised. If the value of enable_aad is False and the value
of enable_azure_rbac is True, a RequiredArgumentMissingError will be raised.
:return: bool
"""
return self._get_enable_azure_rbac(enable_validation=True)
def get_api_server_authorized_ip_ranges(self) -> List[str]:
"""Obtain the value of api_server_authorized_ip_ranges.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic" when api_server_authorized_ip_ranges is assigned, if so, raise the InvalidArgumentValueError.
This function will normalize the parameter by default. It will split the string into a list with "," as the
delimiter.
:return: empty list or list of strings
"""
# read the original value passed by the command
api_server_authorized_ip_ranges = self.raw_param.get(
"api_server_authorized_ip_ranges"
)
if self.decorator_mode == DecoratorMode.CREATE:
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.authorized_ip_ranges is not None
):
api_server_authorized_ip_ranges = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
read_from_mc = True
# normalize
if not read_from_mc:
api_server_authorized_ip_ranges = [
x.strip()
for x in (
api_server_authorized_ip_ranges.split(",")
if api_server_authorized_ip_ranges
else []
)
]
elif self.decorator_mode == DecoratorMode.UPDATE:
# normalize
if api_server_authorized_ip_ranges is not None:
api_server_authorized_ip_ranges = [
x.strip()
for x in (
api_server_authorized_ip_ranges.split(",")
if api_server_authorized_ip_ranges
else []
)
]
# validation
if api_server_authorized_ip_ranges:
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
if self._get_enable_private_cluster(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges is not supported for private cluster"
)
return api_server_authorized_ip_ranges
# pylint: disable=unused-argument
def _get_fqdn_subdomain(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
"""Internal function to obtain the value of fqdn_subdomain.
This function will verify the parameter by default. It will check if both dns_name_prefix and fqdn_subdomain
are assigend, if so, raise the MutuallyExclusiveArgumentError. It will also check when both private_dns_zone
and fqdn_subdomain are assigned, if the value of private_dns_zone is CONST_PRIVATE_DNS_ZONE_SYSTEM, raise an
InvalidArgumentValueError; Otherwise if the value of private_dns_zone is not a valid resource ID, raise an
InvalidArgumentValueError.
:return: string or None
"""
# read the original value passed by the command
fqdn_subdomain = self.raw_param.get("fqdn_subdomain")
# try to read the property value corresponding to the parameter from the `mc` object
# Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is
# no such attribute.
if (
self.mc and
hasattr(self.mc, "fqdn_subdomain") and
self.mc.fqdn_subdomain is not None
):
fqdn_subdomain = self.mc.fqdn_subdomain
# this parameter does not need dynamic completion
# validation
if enable_validation:
if fqdn_subdomain:
if self._get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
private_dns_zone = self.get_private_dns_zone()
if private_dns_zone:
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if not is_valid_resource_id(private_dns_zone):
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID."
)
else:
raise InvalidArgumentValueError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone"
)
return fqdn_subdomain
def get_fqdn_subdomain(self) -> Union[str, None]:
"""Obtain the value of fqdn_subdomain.
This function will verify the parameter by default. It will check if both dns_name_prefix and fqdn_subdomain
are assigend, if so, raise the MutuallyExclusiveArgumentError. It will also check when both private_dns_zone
and fqdn_subdomain are assigned, if the value of private_dns_zone is CONST_PRIVATE_DNS_ZONE_SYSTEM, raise an
InvalidArgumentValueError; Otherwise if the value of private_dns_zone is not a valid resource ID, raise an
InvalidArgumentValueError.
:return: string or None
"""
return self._get_fqdn_subdomain(enable_validation=True)
# pylint: disable=unused-argument
def _get_enable_private_cluster(self, enable_validation: bool = False, **kwargs) -> bool:
"""Internal function to obtain the value of enable_private_cluster.
This function supports the option of enable_validation. When enabled and enable_private_cluster is specified,
if load_balancer_sku equals to basic, raise an InvalidArgumentValueError; if api_server_authorized_ip_ranges
is assigned, raise an MutuallyExclusiveArgumentError; Otherwise when enable_private_cluster is not specified
and disable_public_fqdn or private_dns_zone is assigned, raise an InvalidArgumentValueError.
:return: bool
"""
# read the original value passed by the command
enable_private_cluster = self.raw_param.get("enable_private_cluster")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.enable_private_cluster is not None
):
enable_private_cluster = self.mc.api_server_access_profile.enable_private_cluster
# this parameter does not need dynamic completion
# validation
if enable_validation:
if enable_private_cluster:
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"Please use standard load balancer for private cluster"
)
if self.get_api_server_authorized_ip_ranges():
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges is not supported for private cluster"
)
else:
if self.get_disable_public_fqdn():
raise InvalidArgumentValueError(
"--disable-public-fqdn should only be used with --enable-private-cluster"
)
if self.get_private_dns_zone():
raise InvalidArgumentValueError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster"
)
return enable_private_cluster
def get_enable_private_cluster(self) -> bool:
"""Obtain the value of enable_private_cluster.
This function will verify the parameter by default. When enable_private_cluster is specified, if
load_balancer_sku equals to basic, raise an InvalidArgumentValueError; if api_server_authorized_ip_ranges
is assigned, raise an MutuallyExclusiveArgumentError; Otherwise when enable_private_cluster is not specified
and disable_public_fqdn or private_dns_zone is assigned, raise an InvalidArgumentValueError.
:return: bool
"""
return self._get_enable_private_cluster(enable_validation=True)
def get_disable_public_fqdn(self) -> bool:
"""Obtain the value of disable_public_fqdn.
This function will verify the parameter by default. If enable_private_cluster is not specified and
disable_public_fqdn is assigned, raise an InvalidArgumentValueError.
:return: bool
"""
# read the original value passed by the command
disable_public_fqdn = self.raw_param.get("disable_public_fqdn")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.enable_private_cluster_public_fqdn is not None
):
disable_public_fqdn = not self.mc.api_server_access_profile.enable_private_cluster_public_fqdn
# this parameter does not need dynamic completion
# validation
enable_private_cluster = self._get_enable_private_cluster(enable_validation=False)
if disable_public_fqdn and not enable_private_cluster:
raise InvalidArgumentValueError("--disable-public-fqdn should only be used with --enable-private-cluster")
return disable_public_fqdn
def get_private_dns_zone(self) -> Union[str, None]:
"""Obtain the value of private_dns_zone.
This function will verify the parameter by default. When private_dns_zone is assigned, if enable_private_cluster
is not specified raise an InvalidArgumentValueError. It will also check when both private_dns_zone and
fqdn_subdomain are assigned, if the value of private_dns_zone is CONST_PRIVATE_DNS_ZONE_SYSTEM, raise an
InvalidArgumentValueError; Otherwise if the value of private_dns_zone is not a valid resource ID, raise an
InvalidArgumentValueError.
:return: string or None
"""
# read the original value passed by the command
private_dns_zone = self.raw_param.get("private_dns_zone")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.private_dns_zone is not None
):
private_dns_zone = self.mc.api_server_access_profile.private_dns_zone
# this parameter does not need dynamic completion
# validation
if private_dns_zone:
if not self._get_enable_private_cluster(enable_validation=False):
raise InvalidArgumentValueError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster"
)
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if not is_valid_resource_id(private_dns_zone):
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID."
)
else:
if self._get_fqdn_subdomain(enable_validation=False):
raise InvalidArgumentValueError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone"
)
return private_dns_zone
def get_assign_kubelet_identity(self) -> Union[str, None]:
"""Obtain the value of assign_kubelet_identity.
This function will verify the parameter by default. If assign_identity is not assigned but
assign_kubelet_identity is, a RequiredArgumentMissingError will be raised.
:return: string or None
"""
# read the original value passed by the command
assign_kubelet_identity = self.raw_param.get("assign_kubelet_identity")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.identity_profile and
self.mc.identity_profile.get("kubeletidentity", None) and
getattr(self.mc.identity_profile.get("kubeletidentity"), "resource_id") is not None
):
assign_kubelet_identity = getattr(self.mc.identity_profile.get("kubeletidentity"), "resource_id")
# this parameter does not need dynamic completion
# validation
if assign_kubelet_identity and not self._get_assign_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-kubelet-identity can only be specified when --assign-identity is specified"
)
return assign_kubelet_identity
def get_auto_upgrade_channel(self) -> Union[str, None]:
"""Obtain the value of auto_upgrade_channel.
:return: string or None
"""
# read the original value passed by the command
auto_upgrade_channel = self.raw_param.get("auto_upgrade_channel")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.auto_upgrade_profile and
self.mc.auto_upgrade_profile.upgrade_channel is not None
):
auto_upgrade_channel = self.mc.auto_upgrade_profile.upgrade_channel
# this parameter does not need dynamic completion
# this parameter does not need validation
return auto_upgrade_channel
def get_node_osdisk_diskencryptionset_id(self) -> Union[str, None]:
"""Obtain the value of node_osdisk_diskencryptionset_id.
:return: string or None
"""
# read the original value passed by the command
node_osdisk_diskencryptionset_id = self.raw_param.get("node_osdisk_diskencryptionset_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.disk_encryption_set_id is not None
):
node_osdisk_diskencryptionset_id = self.mc.disk_encryption_set_id
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_diskencryptionset_id
def get_cluster_autoscaler_profile(self) -> Union[Dict[str, str], None]:
"""Dynamically obtain the value of cluster_autoscaler_profile according to the context.
In update mode, when cluster_autoscaler_profile is assigned and auto_scaler_profile in the `mc` object has also
been set, dynamic completion will be triggerd. We will first make a copy of the original configuration
(extract the dictionary from the ManagedClusterPropertiesAutoScalerProfile object), and then update the copied
dictionary with the dictionary of new options.
:return: dictionary or None
"""
# read the original value passed by the command
cluster_autoscaler_profile = self.raw_param.get("cluster_autoscaler_profile")
# try to read the property value corresponding to the parameter from the `mc` object
if self.decorator_mode == DecoratorMode.CREATE:
if self.mc and self.mc.auto_scaler_profile is not None:
cluster_autoscaler_profile = self.mc.auto_scaler_profile
# dynamic completion
if self.decorator_mode == DecoratorMode.UPDATE:
if cluster_autoscaler_profile and self.mc and self.mc.auto_scaler_profile:
# shallow copy should be enough for string-to-string dictionary
copy_of_raw_dict = self.mc.auto_scaler_profile.__dict__.copy()
new_options_dict = dict(
(key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items()
)
copy_of_raw_dict.update(new_options_dict)
cluster_autoscaler_profile = copy_of_raw_dict
# this parameter does not need validation
return cluster_autoscaler_profile
def get_uptime_sla(self) -> bool:
"""Obtain the value of uptime_sla.
:return: bool
"""
# read the original value passed by the command
uptime_sla = self.raw_param.get("uptime_sla")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.sku and
self.mc.sku.tier is not None
):
uptime_sla = self.mc.sku.tier == "Paid"
# this parameter does not need dynamic completion
# this parameter does not need validation
return uptime_sla
def get_tags(self) -> Union[Dict[str, str], None]:
"""Obtain the value of tags.
:return: dictionary or None
"""
# read the original value passed by the command
tags = self.raw_param.get("tags")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.tags is not None
):
tags = self.mc.tags
# this parameter does not need dynamic completion
# this parameter does not need validation
return tags
def get_edge_zone(self) -> Union[str, None]:
"""Obtain the value of edge_zone.
:return: string or None
"""
# read the original value passed by the command
edge_zone = self.raw_param.get("edge_zone")
# try to read the property value corresponding to the parameter from the `mc` object
# Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is
# no such attribute.
if (
self.mc and
hasattr(self.mc, "extended_location") and
self.mc.extended_location and
self.mc.extended_location.name is not None
):
edge_zone = self.mc.extended_location.name
# this parameter does not need dynamic completion
# this parameter does not need validation
return edge_zone
def get_disable_local_accounts(self) -> bool:
"""Obtain the value of disable_local_accounts.
:return: bool
"""
# read the original value passed by the command
disable_local_accounts = self.raw_param.get("disable_local_accounts")
# this parameter does not need dynamic completion
# this parameter does not need validation
return disable_local_accounts
def get_client_id_from_identity_or_sp_profile(self) -> str:
"""Helper function to obtain the value of client_id from identity_profile or service_principal_profile.
Note: This is not a parameter of aks_update, and it will not be decorated into the `mc` object.
If client_id cannot be obtained, raise an UnknownError.
:return: string
"""
client_id = None
if check_is_msi_cluster(self.mc):
if self.mc.identity_profile is None or self.mc.identity_profile["kubeletidentity"] is None:
raise UnknownError(
"Unexpected error getting kubelet's identity for the cluster. "
"Please do not set --attach-acr or --detach-acr. "
"You can manually grant or revoke permission to the identity named "
"<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR."
)
client_id = self.mc.identity_profile["kubeletidentity"].client_id
elif self.mc and self.mc.service_principal_profile is not None:
client_id = self.mc.service_principal_profile.client_id
if not client_id:
raise UnknownError('Cannot get the AKS cluster\'s service principal.')
return client_id
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: ContainerServiceClient,
models: AKSModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
"""Internal controller of aks_create.
Break down the all-in-one aks_create function into several relatively independent functions (some of them have
a certain order dependency) that only focus on a specific profile or process a specific piece of logic.
In addition, an overall control function is provided. By calling the aforementioned independent functions one
by one, a complete ManagedCluster object is gradually decorated and finally requests are sent to create a
cluster.
"""
self.cmd = cmd
self.client = client
self.models = models
# store the context in the process of assemble the ManagedCluster object
self.context = AKSContext(cmd, raw_parameters, decorator_mode=DecoratorMode.CREATE)
# `resource_type` is used to dynamically find the model (of a specific api version) provided by the
# containerservice SDK, most models have been passed through the `models` parameter (instantiatied
# from `AKSModels` (or `PreviewAKSModels` in aks-preview), where resource_type (i.e.,
# api version) has been specified).
self.resource_type = resource_type
def init_mc(self) -> ManagedCluster:
"""Initialize a ManagedCluster object with several parameters and attach it to internal context.
When location is not assigned, function "_get_rg_location" will be called to get the location of the provided
resource group, which internally used ResourceManagementClient to send the request.
:return: the ManagedCluster object
"""
# Initialize a ManagedCluster object with mandatory parameter location and optional parameters tags, dns_prefix,
# kubernetes_version, disable_rbac, node_osdisk_diskencryptionset_id, disable_local_accounts.
mc = self.models.ManagedCluster(
location=self.context.get_location(),
tags=self.context.get_tags(),
dns_prefix=self.context.get_dns_name_prefix(),
kubernetes_version=self.context.get_kubernetes_version(),
enable_rbac=not self.context.get_disable_rbac(),
disk_encryption_set_id=self.context.get_node_osdisk_diskencryptionset_id(),
disable_local_accounts=self.context.get_disable_local_accounts(),
)
# attach mc to AKSContext
self.context.attach_mc(mc)
return mc
def set_up_agent_pool_profiles(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up agent pool profiles for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
node_count,
enable_auto_scaling,
min_count,
max_count,
) = (
self.context.get_node_count_and_enable_cluster_autoscaler_and_min_count_and_max_count()
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=self.context.get_nodepool_name(),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=node_count,
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=min_count,
max_count=max_count,
enable_auto_scaling=enable_auto_scaling,
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up linux profile for the ManagedCluster object.
Linux profile is just used for SSH access to VMs, so it will be omitted if --no-ssh-key option was specified.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()
if not no_ssh_key:
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=ssh_key_value
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up windows profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
# this would throw an error if windows_admin_username is empty (the user enters an empty
# string after being prompted), since admin_username is a required parameter
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
return mc
def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up service principal profile for the ManagedCluster object.
The function "_ensure_aks_service_principal" will be called if the user provides an incomplete sp and secret
pair, which internally used GraphRbacManagementClient to send the request to create sp.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# If customer explicitly provide a service principal, disable managed identity.
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
# Skip create service principal profile for the cluster if the cluster enables managed identity
# and customer doesn't explicitly provide a service principal.
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
return mc
def process_add_role_assignment_for_vnet_subnet(self, mc: ManagedCluster) -> None:
"""Add role assignment for vent subnet.
This function will store an intermediate need_post_creation_vnet_permission_granting.
The function "subnet_role_assignment_exists" will be called to verify if the role assignment already exists for
the subnet, which internally used AuthorizationManagementClient to send the request.
The wrapper function "get_identity_by_msi_client" will be called by "get_user_assigned_identity_client_id" to
get the identity object, which internally use ManagedServiceIdentityClient to send the request.
The function "_add_role_assignment" will be called to add role assignment for the subnet, which internally used
AuthorizationManagementClient to send the request.
:return: None
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
need_post_creation_vnet_permission_granting = False
vnet_subnet_id = self.context.get_vnet_subnet_id()
skip_subnet_role_assignment = (
self.context.get_skip_subnet_role_assignment()
)
if (
vnet_subnet_id and
not skip_subnet_role_assignment and
not subnet_role_assignment_exists(self.cmd, vnet_subnet_id)
):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
service_principal_profile = mc.service_principal_profile
assign_identity = self.context.get_assign_identity()
if service_principal_profile is None and not assign_identity:
msg = (
"It is highly recommended to use USER assigned identity "
"(option --assign-identity) when you want to bring your own"
"subnet, which will have no latency for the role assignment to "
"take effect. When using SYSTEM assigned identity, "
"azure-cli will grant Network Contributor role to the "
"system assigned identity after the cluster is created, and "
"the role assignment will take some time to take effect, see "
"https://docs.microsoft.com/azure/aks/use-managed-identity, "
"proceed to create cluster with system assigned identity?"
)
if not self.context.get_yes() and not prompt_y_n(
msg, default="n"
):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = (
self.context.get_user_assigned_identity_client_id()
)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(
self.cmd,
"Network Contributor",
identity_client_id,
scope=scope,
):
logger.warning(
"Could not create a role assignment for subnet. Are you an Owner on this subscription?"
)
# store need_post_creation_vnet_permission_granting as an intermediate
self.context.set_intermediate(
"need_post_creation_vnet_permission_granting",
need_post_creation_vnet_permission_granting,
overwrite_exists=True,
)
def process_attach_acr(self, mc: ManagedCluster) -> None:
"""Attach acr for the cluster.
The function "_ensure_aks_acr" will be called to create an AcrPull role assignment for the acr, which
internally used AuthorizationManagementClient to send the request.
:return: None
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
attach_acr = self.context.get_attach_acr()
if attach_acr:
# If enable_managed_identity, attach acr operation will be handled after the cluster is created
if not self.context.get_enable_managed_identity():
service_principal_profile = mc.service_principal_profile
_ensure_aks_acr(
self.cmd,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
# not actually used
subscription_id=self.context.get_subscription_id(),
)
def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up network profile for the ManagedCluster object.
Build load balancer profile, verify outbound type and load balancer sku first, then set up network profile.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# build load balancer profile, which is part of the network profile
load_balancer_profile = create_load_balancer_profile(
self.context.get_load_balancer_managed_outbound_ip_count(),
self.context.get_load_balancer_outbound_ips(),
self.context.get_load_balancer_outbound_ip_prefixes(),
self.context.get_load_balancer_outbound_ports(),
self.context.get_load_balancer_idle_timeout(),
models=self.models.lb_models,
)
# verify outbound type
# Note: Validation internally depends on load_balancer_sku, which is a temporary value that is
# dynamically completed.
outbound_type = self.context.get_outbound_type(
load_balancer_profile=load_balancer_profile
)
# verify load balancer sku
load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())
# verify network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy
network_plugin = self.context.get_network_plugin()
(
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
) = (
self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
)
network_profile = None
if any(
[
network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
]
):
# Attention: RP would return UnexpectedLoadBalancerSkuForCurrentOutboundConfiguration internal server error
# if load_balancer_sku is set to basic and load_balancer_profile is assigned.
# Attention: SDK provides default values for pod_cidr, service_cidr, dns_service_ip, docker_bridge_cidr
# and outbound_type, and they might be overwritten to None.
network_profile = self.models.ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku,
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
else:
if load_balancer_sku == "standard" or load_balancer_profile:
network_profile = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku,
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku == "basic":
# load balancer sku must be standard when load balancer profile is provided
network_profile = self.models.ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku,
)
mc.network_profile = network_profile
return mc
# pylint: disable=too-many-statements
def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up addon profiles for the ManagedCluster object.
This function will store following intermediates: monitoring, enable_virtual_node and
ingress_appgw_addon_enabled.
The function "_ensure_container_insights_for_monitoring" will be called to create a deployment which publishes
the Container Insights solution to the Log Analytics workspace.
When workspace_resource_id is not assigned, function "_ensure_default_log_analytics_workspace_for_monitoring"
will be called to create a workspace, which internally used ResourceManagementClient to send the request.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
ManagedClusterAddonProfile = self.models.ManagedClusterAddonProfile
addon_profiles = {}
# error out if any unrecognized or duplicate addon provided
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
# error out if '--enable-addons=virtual-node' is set but aci_subnet_name and vnet_subnet_id are not
addons = self.context.get_enable_addons()
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
workspace_resource_id = self.context.get_workspace_resource_id()
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
# post-process, create a deployment
_ensure_container_insights_for_monitoring(self.cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# set intermediate
self.context.set_intermediate("monitoring", True, overwrite_exists=True)
addons.remove('monitoring')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
aci_subnet_name = self.context.get_aci_subnet_name()
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = self.context.get_virtual_node_addon_os_type()
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
# set intermediate
self.context.set_intermediate("enable_virtual_node", True, overwrite_exists=True)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
appgw_name = self.context.get_appgw_name()
appgw_subnet_cidr = self.context.get_appgw_subnet_cidr()
appgw_id = self.context.get_appgw_id()
appgw_subnet_id = self.context.get_appgw_subnet_id()
appgw_watch_namespace = self.context.get_appgw_watch_namespace()
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
# set intermediate
self.context.set_intermediate("ingress_appgw_addon_enabled", True, overwrite_exists=True)
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if self.context.get_enable_sgxquotehelper():
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
mc.addon_profiles = addon_profiles
return mc
def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up aad profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
aad_profile = None
enable_aad = self.context.get_enable_aad()
if enable_aad:
aad_profile = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=self.context.get_enable_azure_rbac(),
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),
tenant_id=self.context.get_aad_tenant_id()
)
else:
(
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
) = (
self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
)
aad_tenant_id = self.context.get_aad_tenant_id()
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
aad_profile = self.models.ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
mc.aad_profile = aad_profile
return mc
def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up api server access profile and fqdn subdomain for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
api_server_access_profile = None
api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()
enable_private_cluster = self.context.get_enable_private_cluster()
disable_public_fqdn = self.context.get_disable_public_fqdn()
private_dns_zone = self.context.get_private_dns_zone()
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=api_server_authorized_ip_ranges,
enable_private_cluster=True if enable_private_cluster else None,
enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,
private_dns_zone=private_dns_zone
)
mc.api_server_access_profile = api_server_access_profile
fqdn_subdomain = self.context.get_fqdn_subdomain()
mc.fqdn_subdomain = fqdn_subdomain
return mc
def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up identity for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
identity = None
enable_managed_identity = self.context.get_enable_managed_identity()
assign_identity = self.context.get_assign_identity()
if enable_managed_identity and not assign_identity:
identity = self.models.ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc.identity = identity
return mc
def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up identity profile for the ManagedCluster object.
The wrapper function "get_identity_by_msi_client" will be called (by "get_user_assigned_identity_object_id") to
get the identity object, which internally use ManagedServiceIdentityClient to send the request.
The function "_ensure_cluster_identity_permission_on_kubelet_identity" will be called to create a role
assignment if necessary, which internally used AuthorizationManagementClient to send the request.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
identity_profile = None
assign_kubelet_identity = self.context.get_assign_kubelet_identity()
if assign_kubelet_identity:
kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)
identity_profile = {
'kubeletidentity': self.models.UserAssignedIdentity(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
self.cmd,
cluster_identity_object_id,
assign_kubelet_identity)
mc.identity_profile = identity_profile
return mc
def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up auto upgrade profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
auto_upgrade_profile = None
auto_upgrade_channel = self.context.get_auto_upgrade_channel()
if auto_upgrade_channel:
auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)
mc.auto_upgrade_profile = auto_upgrade_profile
return mc
def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up autoscaler profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up sku (uptime sla) for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if self.context.get_uptime_sla():
mc.sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid"
)
return mc
def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:
"""Set up extended location (edge zone) for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
edge_zone = self.context.get_edge_zone()
if edge_zone:
mc.extended_location = self.models.ExtendedLocation(
name=edge_zone,
type=self.models.ExtendedLocationTypes.EDGE_ZONE
)
return mc
def build_custom_headers(self, mc: ManagedCluster) -> None:
"""Build a dictionary contains custom headers.
This function will store an intermediate custom_headers.
:return: None
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen when the cluster enables managed
# identity. In this case, the header is useless and that's OK to not add this header.
custom_headers = None
if mc.service_principal_profile:
custom_headers = {'Ocp-Aad-Session-Key': self.context.get_intermediate("aad_session_key")}
self.context.set_intermediate("custom_headers", custom_headers, overwrite_exists=True)
def construct_default_mc_profile(self) -> ManagedCluster:
"""The overall controller used to construct the default ManagedCluster profile.
The completely constructed ManagedCluster object will later be passed as a parameter to the underlying SDK
(mgmt-containerservice) to send the actual request.
:return: the ManagedCluster object
"""
# initialize the ManagedCluster object
mc = self.init_mc()
# set up agent pool profile(s)
mc = self.set_up_agent_pool_profiles(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
# add role assignment for vent subnet
self.process_add_role_assignment_for_vnet_subnet(mc)
# attach acr (add role assignment for acr)
self.process_attach_acr(mc)
# set up network profile
mc = self.set_up_network_profile(mc)
# set up addon profiles
mc = self.set_up_addon_profiles(mc)
# set up aad profile
mc = self.set_up_aad_profile(mc)
# set up api server access profile and fqdn subdomain
mc = self.set_up_api_server_access_profile(mc)
# set up identity
mc = self.set_up_identity(mc)
# set up identity profile
mc = self.set_up_identity_profile(mc)
# set up auto upgrade profile
mc = self.set_up_auto_upgrade_profile(mc)
# set up auto scaler profile
mc = self.set_up_auto_scaler_profile(mc)
# set up sku
mc = self.set_up_sku(mc)
# set up extended location
mc = self.set_up_extended_location(mc)
# build custom header
self.build_custom_headers(mc)
return mc
def create_mc(self, mc: ManagedCluster) -> ManagedCluster:
"""Send request to create a real managed cluster.
The function "_put_managed_cluster_ensuring_permission" will be called to use the ContainerServiceClient to
send a reqeust to create a real managed cluster, and also add necessary role assignments for some optional
components.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
self.cmd,
self.client,
self.context.get_subscription_id(),
self.context.get_resource_group_name(),
self.context.get_name(),
mc,
self.context.get_intermediate("monitoring"),
self.context.get_intermediate("ingress_appgw_addon_enabled"),
self.context.get_intermediate("enable_virtual_node"),
self.context.get_intermediate("need_post_creation_vnet_permission_granting"),
self.context.get_vnet_subnet_id(),
self.context.get_enable_managed_identity(),
self.context.get_attach_acr(),
self.context.get_intermediate("custom_headers"),
self.context.get_no_wait())
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
class AKSUpdateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: ContainerServiceClient,
models: AKSModels,
raw_parameters: Dict,
):
"""Internal controller of aks_update.
Break down the all-in-one aks_update function into several relatively independent functions (some of them have
a certain order dependency) that only focus on a specific profile or process a specific piece of logic.
In addition, an overall control function is provided. By calling the aforementioned independent functions one
by one, a complete ManagedCluster object is gradually updated and finally requests are sent to update an
existing cluster.
"""
self.cmd = cmd
self.client = client
self.models = models
# store the context in the process of assemble the ManagedCluster object
self.context = AKSContext(cmd, raw_parameters, decorator_mode=DecoratorMode.UPDATE)
def check_raw_parameters(self):
"""Helper function to check whether any parameters are set.
If the values of all the parameters are the default values, the command execution will be terminated early and
raise a RequiredArgumentMissingError. Neither the request to fetch or update the ManagedCluster object will be
sent.
:return: None
"""
# exclude some irrelevant or mandatory parameters
excluded_keys = ("cmd", "client", "resource_group_name", "name")
# check whether the remaining parameters are set
# the default value None or False (and other empty values, like empty string) will be considered as not set
is_changed = any(v for k, v in self.context.raw_param.items() if k not in excluded_keys)
# special cases
# some parameters support the use of empty string or dictionary to update/remove previously set values
is_default = (
self.context.get_cluster_autoscaler_profile() is None and
self.context.get_api_server_authorized_ip_ranges() is None
)
if not is_changed and is_default:
# Note: Uncomment the followings to automatically generate the error message.
# option_names = [
# '"{}"'.format(format_parameter_name_to_option_name(x))
# for x in self.context.raw_param.keys()
# if x not in excluded_keys
# ]
# error_msg = "Please specify one or more of {}.".format(
# " or ".join(option_names)
# )
# raise RequiredArgumentMissingError(error_msg)
raise RequiredArgumentMissingError(
'Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--auto-upgrade-channel" or '
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-public-fqdn" or '
'"--disable-public-fqdn"'
)
def fetch_mc(self) -> ManagedCluster:
"""Get the ManagedCluster object currently in use and attach it to internal context.
Internally send request using ContainerServiceClient and parameters name (cluster) and resource group name.
:return: the ManagedCluster object
"""
mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())
# attach mc to AKSContext
self.context.attach_mc(mc)
return mc
def update_auto_scaler_profile(self, mc):
"""Update autoscaler profile for the ManagedCluster object.
:return: the ManagedCluster object
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
update_cluster_autoscaler,
enable_cluster_autoscaler,
disable_cluster_autoscaler,
min_count,
max_count,
) = (
self.context.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
)
if update_cluster_autoscaler or enable_cluster_autoscaler:
mc.agent_pool_profiles[0].enable_auto_scaling = True
mc.agent_pool_profiles[0].min_count = int(min_count)
mc.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
mc.agent_pool_profiles[0].enable_auto_scaling = False
mc.agent_pool_profiles[0].min_count = None
mc.agent_pool_profiles[0].max_count = None
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
if cluster_autoscaler_profile is not None:
# update profile (may clear profile with empty dictionary)
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
def process_attach_detach_acr(self, mc: ManagedCluster) -> None:
"""Attach or detach acr for the cluster.
The function "_ensure_aks_acr" will be called to create or delete an AcrPull role assignment for the acr, which
internally used AuthorizationManagementClient to send the request.
:return: None
"""
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
subscription_id = self.context.get_subscription_id()
client_id = self.context.get_client_id_from_identity_or_sp_profile()
attach_acr = self.context.get_attach_acr()
detach_acr = self.context.get_detach_acr()
if attach_acr:
_ensure_aks_acr(self.cmd,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(self.cmd,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
def update_default_mc_profile(self) -> ManagedCluster:
"""The overall controller used to update the default ManagedCluster profile.
Note: To reduce the risk of regression introduced by refactoring, this function is not complete and is being
implemented gradually.
The completely updated ManagedCluster object will later be passed as a parameter to the underlying SDK
(mgmt-containerservice) to send the actual request.
:return: the ManagedCluster object
"""
# check raw parameters
self.check_raw_parameters()
# fetch the ManagedCluster object
mc = self.fetch_mc()
# update auto scaler profile
mc = self.update_auto_scaler_profile(mc)
# attach or detach acr (add or delete role assignment for acr)
self.process_attach_detach_acr(mc)
return mc
def update_mc(self) -> ManagedCluster:
"""Send request to update the existing managed cluster.
Note: To reduce the risk of regression introduced by refactoring, this function is not complete and is being
implemented gradually.
The function "_put_managed_cluster_ensuring_permission" will be called to use the ContainerServiceClient to
send a reqeust to update the existing managed cluster, and also add necessary role assignments for some optional
components.
:return: the ManagedCluster object
"""
| 44.806909 | 120 | 0.656979 |
import re
import sys
import time
from distutils.version import StrictVersion
from typing import Any, Dict, List, Tuple, TypeVar, Union
from azure.cli.command_modules.acs._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_OUTBOUND_TYPE_LOAD_BALANCER,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_PRIVATE_DNS_ZONE_SYSTEM,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
DecoratorMode,
)
from azure.cli.command_modules.acs.custom import (
_add_role_assignment,
_ensure_aks_acr,
_ensure_aks_service_principal,
_ensure_cluster_identity_permission_on_kubelet_identity,
_ensure_container_insights_for_monitoring,
_ensure_default_log_analytics_workspace_for_monitoring,
_get_rg_location,
_get_user_assigned_identity,
_put_managed_cluster_ensuring_permission,
create_load_balancer_profile,
set_load_balancer_sku,
subnet_role_assignment_exists,
)
from azure.cli.core import AzCommandsLoader
from azure.cli.core._profile import Profile
from azure.cli.core.azclierror import (
ArgumentUsageError,
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
NoTTYError,
RequiredArgumentMissingError,
UnknownError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import truncate_text
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt, prompt_pass, prompt_y_n
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id
logger = get_logger(__name__)
ContainerServiceClient = TypeVar("ContainerServiceClient")
Identity = TypeVar("Identity")
ManagedCluster = TypeVar("ManagedCluster")
ManagedClusterLoadBalancerProfile = TypeVar("ManagedClusterLoadBalancerProfile")
ResourceReference = TypeVar("ResourceReference")
def format_parameter_name_to_option_name(parameter_name: str) -> str:
option_name = "--" + parameter_name.replace("_", "-")
return option_name
def safe_list_get(li: List, idx: int, default: Any = None) -> Any:
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
def safe_lower(obj: Any) -> Any:
if isinstance(obj, str):
return obj.lower()
return obj
def check_is_msi_cluster(mc: ManagedCluster) -> bool:
if mc and mc.identity and mc.identity.type is not None:
identity_type = mc.identity.type.casefold()
if identity_type in ("systemassigned", "userassigned"):
return True
return False
def validate_counts_in_autoscaler(
node_count,
enable_cluster_autoscaler,
min_count,
max_count,
decorator_mode,
) -> None:
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if decorator_mode == DecoratorMode.CREATE:
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
option_name = "--enable-cluster-autoscaler"
if decorator_mode == DecoratorMode.UPDATE:
option_name += " or --update-cluster-autoscaler"
raise RequiredArgumentMissingError(
"min-count and max-count are required for {}, please use the flag".format(
option_name
)
)
class AKSModels:
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ManagedClusterAddonProfile = self.__cmd.get_models(
"ManagedClusterAddonProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterPropertiesAutoScalerProfile = self.__cmd.get_models(
"ManagedClusterPropertiesAutoScalerProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.init_lb_models()
def init_lb_models(self) -> None:
lb_models = {}
lb_models["ManagedClusterLoadBalancerProfile"] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileManagedOutboundIPs"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileManagedOutboundIPs",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileOutboundIPs"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileOutboundIPs",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models[
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
] = self.__cmd.get_models(
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
lb_models["ResourceReference"] = self.__cmd.get_models(
"ResourceReference",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.lb_models = lb_models
class AKSContext:
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict, decorator_mode):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.decorator_mode = decorator_mode
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc: ManagedCluster) -> None:
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None) -> Any:
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
) -> None:
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled. " \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str) -> None:
self.intermediates.pop(variable_name, None)
def get_subscription_id(self):
subscription_id = self.get_intermediate("subscription_id", None)
if not subscription_id:
subscription_id = self.cmd.cli_ctx.data.get('subscription_id')
if not subscription_id:
subscription_id = Profile(cli_ctx=self.cmd.cli_ctx).get_subscription_id()
self.cmd.cli_ctx.data['subscription_id'] = subscription_id
self.set_intermediate("subscription_id", subscription_id, overwrite_exists=True)
return subscription_id
def get_resource_group_name(self) -> str:
resource_group_name = self.raw_param.get("resource_group_name")
return resource_group_name
def get_name(self) -> str:
name = self.raw_param.get("name")
return name
def _get_location(self, read_only: bool = False, **kwargs) -> Union[str, None]:
location = self.raw_param.get("location")
read_from_mc = False
if self.mc and self.mc.location is not None:
location = self.mc.location
read_from_mc = True
if read_only:
return location
if not read_from_mc and location is None:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
return location
def get_location(self) -> Union[str, None]:
return self._get_location()
def get_ssh_key_value_and_no_ssh_key(self) -> Tuple[str, bool]:
raw_value = self.raw_param.get("ssh_key_value")
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
read_from_mc = False
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
read_from_mc = True
else:
ssh_key_value = raw_value
no_ssh_key = self.raw_param.get("no_ssh_key")
if read_from_mc and no_ssh_key:
raise CLIInternalError(
"Inconsistent state detected, ssh_key_value is read from the `mc` object while no_ssh_key is enabled."
)
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(
ssh_key_value
):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise InvalidArgumentValueError(
"Provided ssh key ({}) is invalid or non-existent".format(
shortened_key
)
)
return ssh_key_value, no_ssh_key
def _get_dns_name_prefix(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
dns_name_prefix = self.raw_param.get("dns_name_prefix")
read_from_mc = False
if self.mc and self.mc.dns_prefix is not None:
dns_name_prefix = self.mc.dns_prefix
read_from_mc = True
if read_only:
return dns_name_prefix
dynamic_completion = False
if not dns_name_prefix and not self._get_fqdn_subdomain(enable_validation=False):
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
name = self.get_name()
resource_group_name = self.get_resource_group_name()
subscription_id = self.get_subscription_id()
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
dns_name_prefix = '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
if enable_validation:
if dns_name_prefix and self._get_fqdn_subdomain(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
def get_dns_name_prefix(self) -> Union[str, None]:
return self._get_dns_name_prefix(enable_validation=True)
def get_kubernetes_version(self) -> str:
kubernetes_version = self.raw_param.get("kubernetes_version")
if self.mc and self.mc.kubernetes_version is not None:
kubernetes_version = self.mc.kubernetes_version
return kubernetes_version
def _get_vm_set_type(self, read_only: bool = False, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("vm_set_type")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
if read_only:
return vm_set_type
if not read_from_mc:
kubernetes_version = self.get_kubernetes_version()
if not vm_set_type:
if kubernetes_version and StrictVersion(kubernetes_version) < StrictVersion("1.12.9"):
print(
"Setting vm_set_type to availabilityset as it is not specified and kubernetes version({}) "
"less than 1.12.9 only supports availabilityset\n".format(
kubernetes_version
)
)
vm_set_type = "AvailabilitySet"
if not vm_set_type:
vm_set_type = "VirtualMachineScaleSets"
if vm_set_type.lower() == "AvailabilitySet".lower():
vm_set_type = "AvailabilitySet"
if vm_set_type.lower() == "VirtualMachineScaleSets".lower():
vm_set_type = "VirtualMachineScaleSets"
return vm_set_type
return vm_set_type
def get_vm_set_type(self) -> Union[str, None]:
return self._get_vm_set_type()
def get_nodepool_name(self) -> str:
raw_value = self.raw_param.get("nodepool_name")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
else:
nodepool_name = raw_value
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
return nodepool_name
def get_nodepool_tags(self) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_tags")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
return nodepool_tags
def get_nodepool_labels(self) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_labels")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
return nodepool_labels
def get_node_vm_size(self) -> str:
raw_value = self.raw_param.get("node_vm_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
return node_vm_size
def get_vnet_subnet_id(self) -> Union[str, None]:
raw_value = self.raw_param.get("vnet_subnet_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
return vnet_subnet_id
def get_ppg(self) -> Union[str, None]:
raw_value = self.raw_param.get("ppg")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
return ppg
def get_zones(self) -> Union[List[str], None]:
raw_value = self.raw_param.get("zones")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
return zones
def get_enable_node_public_ip(self) -> bool:
raw_value = self.raw_param.get("enable_node_public_ip")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
return enable_node_public_ip
def get_node_public_ip_prefix_id(self) -> Union[str, None]:
raw_value = self.raw_param.get("node_public_ip_prefix_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
return node_public_ip_prefix_id
def get_enable_encryption_at_host(self) -> bool:
raw_value = self.raw_param.get("enable_encryption_at_host")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
return enable_encryption_at_host
def get_enable_ultra_ssd(self) -> bool:
raw_value = self.raw_param.get("enable_ultra_ssd")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
return enable_ultra_ssd
def get_max_pods(self) -> Union[int, None]:
raw_value = self.raw_param.get("max_pods")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
return max_pods
def get_node_osdisk_size(self) -> Union[int, None]:
raw_value = self.raw_param.get("node_osdisk_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
return node_osdisk_size
def get_node_osdisk_type(self) -> Union[str, None]:
raw_value = self.raw_param.get("node_osdisk_type")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
return node_osdisk_type
def get_node_count_and_enable_cluster_autoscaler_and_min_count_and_max_count(
self,
) -> Tuple[int, bool, Union[int, None], Union[int, None]]:
agent_pool_profile = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
node_count = self.raw_param.get("node_count")
if agent_pool_profile and agent_pool_profile.count is not None:
node_count = agent_pool_profile.count
enable_cluster_autoscaler = self.raw_param.get("enable_cluster_autoscaler")
if agent_pool_profile and agent_pool_profile.enable_auto_scaling is not None:
enable_cluster_autoscaler = agent_pool_profile.enable_auto_scaling
min_count = self.raw_param.get("min_count")
if agent_pool_profile and agent_pool_profile.min_count is not None:
min_count = agent_pool_profile.min_count
max_count = self.raw_param.get("max_count")
if agent_pool_profile and agent_pool_profile.max_count is not None:
max_count = agent_pool_profile.max_count
validate_counts_in_autoscaler(
node_count,
enable_cluster_autoscaler,
min_count,
max_count,
decorator_mode=DecoratorMode.CREATE,
)
return node_count, enable_cluster_autoscaler, min_count, max_count
def get_update_enable_disable_cluster_autoscaler_and_min_max_count(
self,
) -> Tuple[bool, bool, bool, Union[int, None], Union[int, None]]:
agent_pool_profile = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
update_cluster_autoscaler = self.raw_param.get("update_cluster_autoscaler")
enable_cluster_autoscaler = self.raw_param.get("enable_cluster_autoscaler")
disable_cluster_autoscaler = self.raw_param.get("disable_cluster_autoscaler")
min_count = self.raw_param.get("min_count")
max_count = self.raw_param.get("max_count")
if (enable_cluster_autoscaler or update_cluster_autoscaler) and len(self.mc.agent_pool_profiles) > 1:
raise ArgumentUsageError(
'There are more than one node pool in the cluster. Please use "az aks nodepool" command '
"to update per node pool auto scaler settings"
)
if enable_cluster_autoscaler + update_cluster_autoscaler + disable_cluster_autoscaler > 1:
raise MutuallyExclusiveArgumentError(
"Can only specify one of --enable-cluster-autoscaler, --update-cluster-autoscaler and "
"--disable-cluster-autoscaler"
)
validate_counts_in_autoscaler(
None,
enable_cluster_autoscaler or update_cluster_autoscaler,
min_count,
max_count,
decorator_mode=DecoratorMode.UPDATE,
)
if enable_cluster_autoscaler and agent_pool_profile.enable_auto_scaling:
logger.warning(
"Cluster autoscaler is already enabled for this node pool.\n"
'Please run "az aks --update-cluster-autoscaler" '
"if you want to update min-count or max-count."
)
sys.exit(0)
if update_cluster_autoscaler and not agent_pool_profile.enable_auto_scaling:
raise InvalidArgumentValueError(
"Cluster autoscaler is not enabled for this node pool.\n"
'Run "az aks nodepool update --enable-cluster-autoscaler" '
"to enable cluster with min-count and max-count."
)
if disable_cluster_autoscaler and not agent_pool_profile.enable_auto_scaling:
logger.warning(
"Cluster autoscaler is already disabled for this node pool."
)
sys.exit(0)
return update_cluster_autoscaler, enable_cluster_autoscaler, disable_cluster_autoscaler, min_count, max_count
def get_admin_username(self) -> str:
admin_username = self.raw_param.get("admin_username")
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.admin_username is not None
):
admin_username = self.mc.linux_profile.admin_username
return admin_username
def _get_windows_admin_username_and_password(
self, read_only: bool = False, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
windows_admin_username = self.raw_param.get("windows_admin_username")
username_read_from_mc = False
if (
self.mc and
self.mc.windows_profile and
self.mc.windows_profile.admin_username is not None
):
windows_admin_username = self.mc.windows_profile.admin_username
username_read_from_mc = True
windows_admin_password = self.raw_param.get("windows_admin_password")
password_read_from_mc = False
if (
self.mc and
self.mc.windows_profile and
self.mc.windows_profile.admin_password is not None
):
windows_admin_password = self.mc.windows_profile.admin_password
password_read_from_mc = True
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
if read_only:
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
return windows_admin_username, windows_admin_password
def get_windows_admin_username_and_password(
self,
) -> Tuple[Union[str, None], Union[str, None]]:
return self._get_windows_admin_username_and_password()
def get_enable_ahub(self) -> bool:
enable_ahub = self.raw_param.get("enable_ahub")
if self.mc and self.mc.windows_profile:
enable_ahub = self.mc.windows_profile.license_type == "Windows_Server"
return enable_ahub
def _get_service_principal_and_client_secret(
self, read_only: bool = False, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
service_principal = self.raw_param.get("service_principal")
sp_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.client_id is not None
):
service_principal = self.mc.service_principal_profile.client_id
sp_read_from_mc = True
client_secret = self.raw_param.get("client_secret")
secret_read_from_mc = False
if (
self.mc and
self.mc.service_principal_profile and
self.mc.service_principal_profile.secret is not None
):
client_secret = self.mc.service_principal_profile.secret
secret_read_from_mc = True
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
if read_only:
return service_principal, client_secret
dynamic_completion = False
enable_managed_identity = self._get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_subscription_id(),
dns_name_prefix=self._get_dns_name_prefix(enable_validation=False),
fqdn_subdomain=self._get_fqdn_subdomain(enable_validation=False),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
self.set_intermediate("aad_session_key", principal_obj.get("aad_session_key"), overwrite_exists=True)
return service_principal, client_secret
def get_service_principal_and_client_secret(
self
) -> Tuple[Union[str, None], Union[str, None]]:
return self._get_service_principal_and_client_secret()
def _get_enable_managed_identity(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> bool:
enable_managed_identity = self.raw_param.get("enable_managed_identity")
read_from_mc = False
if self.mc and self.mc.identity:
enable_managed_identity = check_is_msi_cluster(self.mc)
read_from_mc = True
if read_only:
return enable_managed_identity
(
service_principal,
client_secret,
) = self._get_service_principal_and_client_secret(read_only=True)
if not read_from_mc and service_principal and client_secret:
enable_managed_identity = False
if enable_validation:
if not enable_managed_identity and self._get_assign_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-identity can only be specified when --enable-managed-identity is specified"
)
return enable_managed_identity
def get_enable_managed_identity(self) -> bool:
return self._get_enable_managed_identity(enable_validation=True)
def get_skip_subnet_role_assignment(self) -> bool:
skip_subnet_role_assignment = self.raw_param.get("skip_subnet_role_assignment")
return skip_subnet_role_assignment
def _get_assign_identity(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("assign_identity")
value_obtained_from_mc = None
if (
self.mc and
self.mc.identity and
self.mc.identity.user_assigned_identities is not None
):
value_obtained_from_mc = safe_list_get(
list(self.mc.identity.user_assigned_identities.keys()), 0, None
)
if value_obtained_from_mc is not None:
assign_identity = value_obtained_from_mc
else:
assign_identity = raw_value
if enable_validation:
if assign_identity:
if not self._get_enable_managed_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-identity can only be specified when --enable-managed-identity is specified"
)
else:
if self.get_assign_kubelet_identity():
raise RequiredArgumentMissingError(
"--assign-kubelet-identity can only be specified when --assign-identity is specified"
)
return assign_identity
def get_assign_identity(self) -> Union[str, None]:
return self._get_assign_identity(enable_validation=True)
def get_identity_by_msi_client(self, assigned_identity: str) -> Identity:
return _get_user_assigned_identity(self.cmd.cli_ctx, assigned_identity)
def get_user_assigned_identity_client_id(self) -> str:
assigned_identity = self.get_assign_identity()
if assigned_identity is None or assigned_identity == "":
raise RequiredArgumentMissingError("No assigned identity provided.")
return self.get_identity_by_msi_client(assigned_identity).client_id
def get_user_assigned_identity_object_id(self) -> str:
assigned_identity = self.get_assign_identity()
if assigned_identity is None or assigned_identity == "":
raise RequiredArgumentMissingError("No assigned identity provided.")
return self.get_identity_by_msi_client(assigned_identity).principal_id
def get_yes(self) -> bool:
yes = self.raw_param.get("yes")
return yes
def get_no_wait(self) -> bool:
no_wait = self.raw_param.get("no_wait")
return no_wait
def get_attach_acr(self) -> Union[str, None]:
attach_acr = self.raw_param.get("attach_acr")
if self.decorator_mode == DecoratorMode.CREATE and attach_acr:
if self._get_enable_managed_identity(enable_validation=False) and self.get_no_wait():
raise MutuallyExclusiveArgumentError(
"When --attach-acr and --enable-managed-identity are both specified, "
"--no-wait is not allowed, please wait until the whole operation succeeds."
)
service_principal, _ = self._get_service_principal_and_client_secret(read_only=True)
if not service_principal:
raise RequiredArgumentMissingError(
"No service principal provided to create the acrpull role assignment for acr."
)
return attach_acr
def get_detach_acr(self) -> Union[str, None]:
detach_acr = self.raw_param.get("detach_acr")
return detach_acr
def _get_load_balancer_sku(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
load_balancer_sku = safe_lower(self.raw_param.get("load_balancer_sku"))
read_from_mc = False
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_sku is not None
):
load_balancer_sku = safe_lower(
self.mc.network_profile.load_balancer_sku
)
read_from_mc = True
if read_only:
return load_balancer_sku
if not read_from_mc and load_balancer_sku is None:
load_balancer_sku = safe_lower(
set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
)
if enable_validation:
if load_balancer_sku == "basic":
if self.get_api_server_authorized_ip_ranges():
raise InvalidArgumentValueError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
if self.get_enable_private_cluster():
raise InvalidArgumentValueError(
"Please use standard load balancer for private cluster"
)
return load_balancer_sku
def get_load_balancer_sku(self) -> Union[str, None]:
return safe_lower(self._get_load_balancer_sku(enable_validation=True))
def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]:
load_balancer_managed_outbound_ip_count = self.raw_param.get(
"load_balancer_managed_outbound_ip_count"
)
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None
):
load_balancer_managed_outbound_ip_count = (
self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count
)
return load_balancer_managed_outbound_ip_count
def get_load_balancer_outbound_ips(self) -> Union[str, List[ResourceReference], None]:
load_balancer_outbound_ips = self.raw_param.get(
"load_balancer_outbound_ips"
)
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_i_ps and
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps is not None
):
load_balancer_outbound_ips = (
self.mc.network_profile.load_balancer_profile.outbound_i_ps.public_i_ps
)
return load_balancer_outbound_ips
def get_load_balancer_outbound_ip_prefixes(self) -> Union[str, List[ResourceReference], None]:
load_balancer_outbound_ip_prefixes = self.raw_param.get(
"load_balancer_outbound_ip_prefixes"
)
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes and
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes is not None
):
load_balancer_outbound_ip_prefixes = (
self.mc.network_profile.load_balancer_profile.outbound_ip_prefixes.public_ip_prefixes
)
return load_balancer_outbound_ip_prefixes
def get_load_balancer_outbound_ports(self) -> Union[int, None]:
load_balancer_outbound_ports = self.raw_param.get(
"load_balancer_outbound_ports"
)
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports is not None
):
load_balancer_outbound_ports = (
self.mc.network_profile.load_balancer_profile.allocated_outbound_ports
)
return load_balancer_outbound_ports
def get_load_balancer_idle_timeout(self) -> Union[int, None]:
load_balancer_idle_timeout = self.raw_param.get(
"load_balancer_idle_timeout"
)
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.load_balancer_profile and
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes is not None
):
load_balancer_idle_timeout = (
self.mc.network_profile.load_balancer_profile.idle_timeout_in_minutes
)
return load_balancer_idle_timeout
def _get_outbound_type(
self,
enable_validation: bool = False,
read_only: bool = False,
load_balancer_profile: ManagedClusterLoadBalancerProfile = None,
**kwargs
) -> Union[str, None]:
outbound_type = self.raw_param.get("outbound_type")
read_from_mc = False
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.outbound_type is not None
):
outbound_type = self.mc.network_profile.outbound_type
read_from_mc = True
if read_only:
return outbound_type
if not read_from_mc and outbound_type != CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING:
outbound_type = CONST_OUTBOUND_TYPE_LOAD_BALANCER
if enable_validation:
if outbound_type == CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING:
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"userDefinedRouting doesn't support basic load balancer sku"
)
if self.get_vnet_subnet_id() in ["", None]:
raise RequiredArgumentMissingError(
"--vnet-subnet-id must be specified for userDefinedRouting and it must "
"be pre-configured with a route table with egress rules"
)
if load_balancer_profile:
if (
load_balancer_profile.managed_outbound_i_ps or
load_balancer_profile.outbound_i_ps or
load_balancer_profile.outbound_ip_prefixes
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing a standard load balancer with IP addresses"
)
else:
if (
self.get_load_balancer_managed_outbound_ip_count() or
self.get_load_balancer_outbound_ips() or
self.get_load_balancer_outbound_ip_prefixes()
):
raise MutuallyExclusiveArgumentError(
"userDefinedRouting doesn't support customizing a standard load balancer with IP addresses"
)
return outbound_type
def get_outbound_type(
self,
load_balancer_profile: ManagedClusterLoadBalancerProfile = None
) -> Union[str, None]:
return self._get_outbound_type(
enable_validation=True, load_balancer_profile=load_balancer_profile
)
# pylint: disable=unused-argument
def _get_network_plugin(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
# read the original value passed by the command
network_plugin = self.raw_param.get("network_plugin")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.network_profile and
self.mc.network_profile.network_plugin is not None
):
network_plugin = self.mc.network_profile.network_plugin
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
) = (
self.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
)
if network_plugin:
if network_plugin == "azure" and pod_cidr:
raise InvalidArgumentValueError(
"Please use kubenet as the network plugin type when pod_cidr is specified"
)
else:
if (
pod_cidr or
service_cidr or
dns_service_ip or
docker_bridge_address or
network_policy
):
raise RequiredArgumentMissingError(
"Please explicitly specify the network plugin type"
)
return network_plugin
def get_network_plugin(self) -> Union[str, None]:
return self._get_network_plugin(enable_validation=True)
def get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy(
self,
) -> Tuple[
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
Union[str, None],
]:
# get network profile from `mc`
network_profile = None
if self.mc:
network_profile = self.mc.network_profile
# pod_cidr
# read the original value passed by the command
pod_cidr = self.raw_param.get("pod_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.pod_cidr is not None:
pod_cidr = network_profile.pod_cidr
# service_cidr
# read the original value passed by the command
service_cidr = self.raw_param.get("service_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.service_cidr is not None:
service_cidr = network_profile.service_cidr
# dns_service_ip
# read the original value passed by the command
dns_service_ip = self.raw_param.get("dns_service_ip")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.dns_service_ip is not None:
dns_service_ip = network_profile.dns_service_ip
# docker_bridge_address
# read the original value passed by the command
docker_bridge_address = self.raw_param.get("docker_bridge_address")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.docker_bridge_cidr is not None:
docker_bridge_address = network_profile.docker_bridge_cidr
# network_policy
# read the original value passed by the command
network_policy = self.raw_param.get("network_policy")
# try to read the property value corresponding to the parameter from the `mc` object
if network_profile and network_profile.network_policy is not None:
network_policy = network_profile.network_policy
# these parameters do not need dynamic completion
# validation
network_plugin = self._get_network_plugin(enable_validation=False)
if network_plugin:
if network_plugin == "azure" and pod_cidr:
raise InvalidArgumentValueError(
"Please use kubenet as the network plugin type when pod_cidr is specified"
)
else:
if (
pod_cidr or
service_cidr or
dns_service_ip or
docker_bridge_address or
network_policy
):
raise RequiredArgumentMissingError(
"Please explicitly specify the network plugin type"
)
return pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy
# pylint: disable=unused-argument
def _get_enable_addons(self, enable_validation: bool = False, **kwargs) -> List[str]:
# read the original value passed by the command
enable_addons = self.raw_param.get("enable_addons")
# normalize
enable_addons = enable_addons.split(',') if enable_addons else []
# validation
if enable_validation:
# check duplicate addons
duplicate_addons_set = {
x for x in enable_addons if enable_addons.count(x) >= 2
}
if len(duplicate_addons_set) != 0:
raise InvalidArgumentValueError(
"Duplicate addon{} '{}' found in option --enable-addons.".format(
"s" if len(duplicate_addons_set) > 1 else "",
",".join(duplicate_addons_set),
)
)
# check unrecognized addons
enable_addons_set = set(enable_addons)
invalid_addons_set = enable_addons_set.difference(ADDONS.keys())
if len(invalid_addons_set) != 0:
raise InvalidArgumentValueError(
"'{}' {} not recognized by the --enable-addons argument.".format(
",".join(invalid_addons_set),
"are" if len(invalid_addons_set) > 1 else "is",
)
)
# check monitoring/workspace_resource_id
workspace_resource_id = self._get_workspace_resource_id(read_only=True)
if "monitoring" not in enable_addons and workspace_resource_id:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# check virtual node/aci_subnet_name/vnet_subnet_id
# Note: The external parameters involved in the validation are not verified in their own getters.
aci_subnet_name = self.get_aci_subnet_name()
vnet_subnet_id = self.get_vnet_subnet_id()
if "virtual-node" in enable_addons and not (aci_subnet_name and vnet_subnet_id):
raise RequiredArgumentMissingError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
return enable_addons
def get_enable_addons(self) -> List[str]:
return self._get_enable_addons(enable_validation=True)
# pylint: disable=unused-argument
def _get_workspace_resource_id(
self, enable_validation: bool = False, read_only: bool = False, **kwargs
) -> Union[str, None]:
# read the original value passed by the command
workspace_resource_id = self.raw_param.get("workspace_resource_id")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.addon_profiles and
CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) is not None
):
workspace_resource_id = self.mc.addon_profiles.get(
CONST_MONITORING_ADDON_NAME
).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID)
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return workspace_resource_id
# dynamic completion
if not read_from_mc:
if workspace_resource_id is None:
# use default workspace if exists else create default workspace
workspace_resource_id = (
_ensure_default_log_analytics_workspace_for_monitoring(
self.cmd,
self.get_subscription_id(),
self.get_resource_group_name(),
)
)
# normalize
workspace_resource_id = "/" + workspace_resource_id.strip(" /")
# validation
if enable_validation:
enable_addons = self._get_enable_addons(enable_validation=False)
if workspace_resource_id and "monitoring" not in enable_addons:
raise RequiredArgumentMissingError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
# this parameter does not need validation
return workspace_resource_id
def get_workspace_resource_id(self) -> Union[str, None]:
return self._get_workspace_resource_id(enable_validation=True)
# pylint: disable=no-self-use
def get_virtual_node_addon_os_type(self) -> str:
return "Linux"
def get_aci_subnet_name(self) -> Union[str, None]:
# read the original value passed by the command
aci_subnet_name = self.raw_param.get("aci_subnet_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME) is not None
):
aci_subnet_name = self.mc.addon_profiles.get(
CONST_VIRTUAL_NODE_ADDON_NAME +
self.get_virtual_node_addon_os_type()
).config.get(CONST_VIRTUAL_NODE_SUBNET_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return aci_subnet_name
def get_appgw_name(self) -> Union[str, None]:
# read the original value passed by the command
appgw_name = self.raw_param.get("appgw_name")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME) is not None
):
appgw_name = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_name
def get_appgw_subnet_cidr(self) -> Union[str, None]:
# read the original value passed by the command
appgw_subnet_cidr = self.raw_param.get("appgw_subnet_cidr")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None
):
appgw_subnet_cidr = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_cidr
def get_appgw_id(self) -> Union[str, None]:
# read the original value passed by the command
appgw_id = self.raw_param.get("appgw_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID) is not None
):
appgw_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_id
def get_appgw_subnet_id(self) -> Union[str, None]:
# read the original value passed by the command
appgw_subnet_id = self.raw_param.get("appgw_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID) is not None
):
appgw_subnet_id = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_SUBNET_ID)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_subnet_id
def get_appgw_watch_namespace(self) -> Union[str, None]:
# read the original value passed by the command
appgw_watch_namespace = self.raw_param.get("appgw_watch_namespace")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE) is not None
):
appgw_watch_namespace = self.mc.addon_profiles.get(
CONST_INGRESS_APPGW_ADDON_NAME
).config.get(CONST_INGRESS_APPGW_WATCH_NAMESPACE)
# this parameter does not need dynamic completion
# this parameter does not need validation
return appgw_watch_namespace
def get_enable_sgxquotehelper(self) -> bool:
# read the original value passed by the command
enable_sgxquotehelper = self.raw_param.get("enable_sgxquotehelper")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.addon_profiles and
CONST_CONFCOM_ADDON_NAME in self.mc.addon_profiles and
self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) is not None
):
enable_sgxquotehelper = self.mc.addon_profiles.get(
CONST_CONFCOM_ADDON_NAME
).config.get(CONST_ACC_SGX_QUOTE_HELPER_ENABLED) == "true"
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_sgxquotehelper
# pylint: disable=unused-argument
def _get_enable_aad(self, enable_validation: bool = False, **kwargs) -> bool:
# read the original value passed by the command
enable_aad = self.raw_param.get("enable_aad")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.managed is not None
):
enable_aad = self.mc.aad_profile.managed
# this parameter does not need dynamic completion
# validation
if enable_validation:
(
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
) = (
self.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
if not enable_aad and self._get_enable_azure_rbac(enable_validation=False):
raise RequiredArgumentMissingError(
"--enable-azure-rbac can only be used together with --enable-aad"
)
return enable_aad
def get_enable_aad(self) -> bool:
return self._get_enable_aad(enable_validation=True)
def get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret(
self,
) -> Tuple[Union[str, None], Union[str, None], Union[str, None]]:
# get aad profile from `mc`
aad_profile = None
if self.mc:
aad_profile = self.mc.aad_profile
# read the original value passed by the command
aad_client_app_id = self.raw_param.get("aad_client_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.client_app_id is not None:
aad_client_app_id = aad_profile.client_app_id
# read the original value passed by the command
aad_server_app_id = self.raw_param.get("aad_server_app_id")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_id is not None:
aad_server_app_id = aad_profile.server_app_id
# read the original value passed by the command
aad_server_app_secret = self.raw_param.get("aad_server_app_secret")
# try to read the property value corresponding to the parameter from the `mc` object
if aad_profile and aad_profile.server_app_secret is not None:
aad_server_app_secret = aad_profile.server_app_secret
# these parameters do not need dynamic completion
# validation
enable_aad = self._get_enable_aad(enable_validation=False)
if enable_aad:
if any(
[
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
]
):
raise MutuallyExclusiveArgumentError(
"--enable-aad cannot be used together with --aad-client-app-id, --aad-server-app-id or "
"--aad-server-app-secret"
)
return aad_client_app_id, aad_server_app_id, aad_server_app_secret
# pylint: disable=unused-argument
def _get_aad_tenant_id(self, read_only: bool = False, **kwargs) -> Union[str, None]:
# read the original value passed by the command
aad_tenant_id = self.raw_param.get("aad_tenant_id")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.tenant_id is not None
):
aad_tenant_id = self.mc.aad_profile.tenant_id
read_from_mc = True
# skip dynamic completion & validation if option read_only is specified
if read_only:
return aad_tenant_id
# dynamic completion
if not read_from_mc and not self._get_enable_aad(
enable_validation=False
):
if aad_tenant_id is None and any(
self.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
):
profile = Profile(cli_ctx=self.cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
# this parameter does not need validation
return aad_tenant_id
def get_aad_tenant_id(self) -> Union[str, None]:
return self._get_aad_tenant_id()
def get_aad_admin_group_object_ids(self) -> Union[List[str], None]:
# read the original value passed by the command
aad_admin_group_object_ids = self.raw_param.get("aad_admin_group_object_ids")
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.admin_group_object_i_ds is not None
):
aad_admin_group_object_ids = self.mc.aad_profile.admin_group_object_i_ds
read_from_mc = True
# keep None as None, but empty string ("") to empty list ([])
if not read_from_mc and aad_admin_group_object_ids is not None:
aad_admin_group_object_ids = aad_admin_group_object_ids.split(',') if aad_admin_group_object_ids else []
# this parameter does not need validation
return aad_admin_group_object_ids
# pylint: disable=unused-argument
def _get_disable_rbac(self, enable_validation: bool = False, **kwargs) -> Union[bool, None]:
# read the original value passed by the command
disable_rbac = self.raw_param.get("disable_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.enable_rbac is not None
):
disable_rbac = not self.mc.enable_rbac
# this parameter does not need dynamic completion
# validation
if enable_validation:
if disable_rbac and self._get_enable_azure_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--enable-azure-rbac cannot be used together with --disable-rbac"
)
if disable_rbac and self.get_enable_rbac():
raise MutuallyExclusiveArgumentError("specify either '--disable-rbac' or '--enable-rbac', not both.")
return disable_rbac
def get_disable_rbac(self) -> Union[bool, None]:
return self._get_disable_rbac(enable_validation=True)
def get_enable_rbac(self) -> Union[bool, None]:
# read the original value passed by the command
enable_rbac = self.raw_param.get("enable_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.enable_rbac is not None
):
enable_rbac = self.mc.enable_rbac
# this parameter does not need dynamic completion
# validation
if enable_rbac and self._get_disable_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError("specify either '--disable-rbac' or '--enable-rbac', not both.")
return enable_rbac
# pylint: disable=unused-argument
def _get_enable_azure_rbac(self, enable_validation: bool = False, **kwargs) -> bool:
# read the original value passed by the command
enable_azure_rbac = self.raw_param.get("enable_azure_rbac")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.aad_profile and
self.mc.aad_profile.enable_azure_rbac is not None
):
enable_azure_rbac = self.mc.aad_profile.enable_azure_rbac
# this parameter does not need dynamic completion
# validation
if enable_validation:
if enable_azure_rbac and self._get_disable_rbac(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--enable-azure-rbac cannot be used together with --disable-rbac"
)
if enable_azure_rbac and not self._get_enable_aad(enable_validation=False):
raise RequiredArgumentMissingError(
"--enable-azure-rbac can only be used together with --enable-aad"
)
return enable_azure_rbac
def get_enable_azure_rbac(self) -> bool:
return self._get_enable_azure_rbac(enable_validation=True)
def get_api_server_authorized_ip_ranges(self) -> List[str]:
# read the original value passed by the command
api_server_authorized_ip_ranges = self.raw_param.get(
"api_server_authorized_ip_ranges"
)
if self.decorator_mode == DecoratorMode.CREATE:
# try to read the property value corresponding to the parameter from the `mc` object
read_from_mc = False
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.authorized_ip_ranges is not None
):
api_server_authorized_ip_ranges = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
read_from_mc = True
# normalize
if not read_from_mc:
api_server_authorized_ip_ranges = [
x.strip()
for x in (
api_server_authorized_ip_ranges.split(",")
if api_server_authorized_ip_ranges
else []
)
]
elif self.decorator_mode == DecoratorMode.UPDATE:
# normalize
if api_server_authorized_ip_ranges is not None:
api_server_authorized_ip_ranges = [
x.strip()
for x in (
api_server_authorized_ip_ranges.split(",")
if api_server_authorized_ip_ranges
else []
)
]
# validation
if api_server_authorized_ip_ranges:
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
if self._get_enable_private_cluster(enable_validation=False):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges is not supported for private cluster"
)
return api_server_authorized_ip_ranges
# pylint: disable=unused-argument
def _get_fqdn_subdomain(self, enable_validation: bool = False, **kwargs) -> Union[str, None]:
# read the original value passed by the command
fqdn_subdomain = self.raw_param.get("fqdn_subdomain")
# try to read the property value corresponding to the parameter from the `mc` object
# Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is
# no such attribute.
if (
self.mc and
hasattr(self.mc, "fqdn_subdomain") and
self.mc.fqdn_subdomain is not None
):
fqdn_subdomain = self.mc.fqdn_subdomain
# this parameter does not need dynamic completion
# validation
if enable_validation:
if fqdn_subdomain:
if self._get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
private_dns_zone = self.get_private_dns_zone()
if private_dns_zone:
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if not is_valid_resource_id(private_dns_zone):
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID."
)
else:
raise InvalidArgumentValueError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone"
)
return fqdn_subdomain
def get_fqdn_subdomain(self) -> Union[str, None]:
return self._get_fqdn_subdomain(enable_validation=True)
# pylint: disable=unused-argument
def _get_enable_private_cluster(self, enable_validation: bool = False, **kwargs) -> bool:
# read the original value passed by the command
enable_private_cluster = self.raw_param.get("enable_private_cluster")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.enable_private_cluster is not None
):
enable_private_cluster = self.mc.api_server_access_profile.enable_private_cluster
# this parameter does not need dynamic completion
# validation
if enable_validation:
if enable_private_cluster:
if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic":
raise InvalidArgumentValueError(
"Please use standard load balancer for private cluster"
)
if self.get_api_server_authorized_ip_ranges():
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges is not supported for private cluster"
)
else:
if self.get_disable_public_fqdn():
raise InvalidArgumentValueError(
"--disable-public-fqdn should only be used with --enable-private-cluster"
)
if self.get_private_dns_zone():
raise InvalidArgumentValueError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster"
)
return enable_private_cluster
def get_enable_private_cluster(self) -> bool:
return self._get_enable_private_cluster(enable_validation=True)
def get_disable_public_fqdn(self) -> bool:
# read the original value passed by the command
disable_public_fqdn = self.raw_param.get("disable_public_fqdn")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.enable_private_cluster_public_fqdn is not None
):
disable_public_fqdn = not self.mc.api_server_access_profile.enable_private_cluster_public_fqdn
# this parameter does not need dynamic completion
# validation
enable_private_cluster = self._get_enable_private_cluster(enable_validation=False)
if disable_public_fqdn and not enable_private_cluster:
raise InvalidArgumentValueError("--disable-public-fqdn should only be used with --enable-private-cluster")
return disable_public_fqdn
def get_private_dns_zone(self) -> Union[str, None]:
# read the original value passed by the command
private_dns_zone = self.raw_param.get("private_dns_zone")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.api_server_access_profile and
self.mc.api_server_access_profile.private_dns_zone is not None
):
private_dns_zone = self.mc.api_server_access_profile.private_dns_zone
# this parameter does not need dynamic completion
# validation
if private_dns_zone:
if not self._get_enable_private_cluster(enable_validation=False):
raise InvalidArgumentValueError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster"
)
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if not is_valid_resource_id(private_dns_zone):
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID."
)
else:
if self._get_fqdn_subdomain(enable_validation=False):
raise InvalidArgumentValueError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone"
)
return private_dns_zone
def get_assign_kubelet_identity(self) -> Union[str, None]:
# read the original value passed by the command
assign_kubelet_identity = self.raw_param.get("assign_kubelet_identity")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.identity_profile and
self.mc.identity_profile.get("kubeletidentity", None) and
getattr(self.mc.identity_profile.get("kubeletidentity"), "resource_id") is not None
):
assign_kubelet_identity = getattr(self.mc.identity_profile.get("kubeletidentity"), "resource_id")
# this parameter does not need dynamic completion
# validation
if assign_kubelet_identity and not self._get_assign_identity(enable_validation=False):
raise RequiredArgumentMissingError(
"--assign-kubelet-identity can only be specified when --assign-identity is specified"
)
return assign_kubelet_identity
def get_auto_upgrade_channel(self) -> Union[str, None]:
# read the original value passed by the command
auto_upgrade_channel = self.raw_param.get("auto_upgrade_channel")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.auto_upgrade_profile and
self.mc.auto_upgrade_profile.upgrade_channel is not None
):
auto_upgrade_channel = self.mc.auto_upgrade_profile.upgrade_channel
# this parameter does not need dynamic completion
# this parameter does not need validation
return auto_upgrade_channel
def get_node_osdisk_diskencryptionset_id(self) -> Union[str, None]:
# read the original value passed by the command
node_osdisk_diskencryptionset_id = self.raw_param.get("node_osdisk_diskencryptionset_id")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.disk_encryption_set_id is not None
):
node_osdisk_diskencryptionset_id = self.mc.disk_encryption_set_id
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_diskencryptionset_id
def get_cluster_autoscaler_profile(self) -> Union[Dict[str, str], None]:
# read the original value passed by the command
cluster_autoscaler_profile = self.raw_param.get("cluster_autoscaler_profile")
# try to read the property value corresponding to the parameter from the `mc` object
if self.decorator_mode == DecoratorMode.CREATE:
if self.mc and self.mc.auto_scaler_profile is not None:
cluster_autoscaler_profile = self.mc.auto_scaler_profile
# dynamic completion
if self.decorator_mode == DecoratorMode.UPDATE:
if cluster_autoscaler_profile and self.mc and self.mc.auto_scaler_profile:
# shallow copy should be enough for string-to-string dictionary
copy_of_raw_dict = self.mc.auto_scaler_profile.__dict__.copy()
new_options_dict = dict(
(key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items()
)
copy_of_raw_dict.update(new_options_dict)
cluster_autoscaler_profile = copy_of_raw_dict
# this parameter does not need validation
return cluster_autoscaler_profile
def get_uptime_sla(self) -> bool:
# read the original value passed by the command
uptime_sla = self.raw_param.get("uptime_sla")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.sku and
self.mc.sku.tier is not None
):
uptime_sla = self.mc.sku.tier == "Paid"
# this parameter does not need dynamic completion
# this parameter does not need validation
return uptime_sla
def get_tags(self) -> Union[Dict[str, str], None]:
# read the original value passed by the command
tags = self.raw_param.get("tags")
# try to read the property value corresponding to the parameter from the `mc` object
if (
self.mc and
self.mc.tags is not None
):
tags = self.mc.tags
# this parameter does not need dynamic completion
# this parameter does not need validation
return tags
def get_edge_zone(self) -> Union[str, None]:
# read the original value passed by the command
edge_zone = self.raw_param.get("edge_zone")
# try to read the property value corresponding to the parameter from the `mc` object
# Backward Compatibility: We also support api version v2020.11.01 in profile 2020-09-01-hybrid and there is
# no such attribute.
if (
self.mc and
hasattr(self.mc, "extended_location") and
self.mc.extended_location and
self.mc.extended_location.name is not None
):
edge_zone = self.mc.extended_location.name
# this parameter does not need dynamic completion
# this parameter does not need validation
return edge_zone
def get_disable_local_accounts(self) -> bool:
# read the original value passed by the command
disable_local_accounts = self.raw_param.get("disable_local_accounts")
# this parameter does not need dynamic completion
# this parameter does not need validation
return disable_local_accounts
def get_client_id_from_identity_or_sp_profile(self) -> str:
client_id = None
if check_is_msi_cluster(self.mc):
if self.mc.identity_profile is None or self.mc.identity_profile["kubeletidentity"] is None:
raise UnknownError(
"Unexpected error getting kubelet's identity for the cluster. "
"Please do not set --attach-acr or --detach-acr. "
"You can manually grant or revoke permission to the identity named "
"<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR."
)
client_id = self.mc.identity_profile["kubeletidentity"].client_id
elif self.mc and self.mc.service_principal_profile is not None:
client_id = self.mc.service_principal_profile.client_id
if not client_id:
raise UnknownError('Cannot get the AKS cluster\'s service principal.')
return client_id
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: ContainerServiceClient,
models: AKSModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.cmd = cmd
self.client = client
self.models = models
# store the context in the process of assemble the ManagedCluster object
self.context = AKSContext(cmd, raw_parameters, decorator_mode=DecoratorMode.CREATE)
# `resource_type` is used to dynamically find the model (of a specific api version) provided by the
# containerservice SDK, most models have been passed through the `models` parameter (instantiatied
# from `AKSModels` (or `PreviewAKSModels` in aks-preview), where resource_type (i.e.,
# api version) has been specified).
self.resource_type = resource_type
def init_mc(self) -> ManagedCluster:
# Initialize a ManagedCluster object with mandatory parameter location and optional parameters tags, dns_prefix,
# kubernetes_version, disable_rbac, node_osdisk_diskencryptionset_id, disable_local_accounts.
mc = self.models.ManagedCluster(
location=self.context.get_location(),
tags=self.context.get_tags(),
dns_prefix=self.context.get_dns_name_prefix(),
kubernetes_version=self.context.get_kubernetes_version(),
enable_rbac=not self.context.get_disable_rbac(),
disk_encryption_set_id=self.context.get_node_osdisk_diskencryptionset_id(),
disable_local_accounts=self.context.get_disable_local_accounts(),
)
# attach mc to AKSContext
self.context.attach_mc(mc)
return mc
def set_up_agent_pool_profiles(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
node_count,
enable_auto_scaling,
min_count,
max_count,
) = (
self.context.get_node_count_and_enable_cluster_autoscaler_and_min_count_and_max_count()
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=self.context.get_nodepool_name(),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=node_count,
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=min_count,
max_count=max_count,
enable_auto_scaling=enable_auto_scaling,
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
ssh_key_value, no_ssh_key = self.context.get_ssh_key_value_and_no_ssh_key()
if not no_ssh_key:
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=ssh_key_value
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
# this would throw an error if windows_admin_username is empty (the user enters an empty
# string after being prompted), since admin_username is a required parameter
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
return mc
def set_up_service_principal_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# If customer explicitly provide a service principal, disable managed identity.
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
# Skip create service principal profile for the cluster if the cluster enables managed identity
# and customer doesn't explicitly provide a service principal.
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
return mc
def process_add_role_assignment_for_vnet_subnet(self, mc: ManagedCluster) -> None:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
need_post_creation_vnet_permission_granting = False
vnet_subnet_id = self.context.get_vnet_subnet_id()
skip_subnet_role_assignment = (
self.context.get_skip_subnet_role_assignment()
)
if (
vnet_subnet_id and
not skip_subnet_role_assignment and
not subnet_role_assignment_exists(self.cmd, vnet_subnet_id)
):
service_principal_profile = mc.service_principal_profile
assign_identity = self.context.get_assign_identity()
if service_principal_profile is None and not assign_identity:
msg = (
"It is highly recommended to use USER assigned identity "
"(option --assign-identity) when you want to bring your own"
"subnet, which will have no latency for the role assignment to "
"take effect. When using SYSTEM assigned identity, "
"azure-cli will grant Network Contributor role to the "
"system assigned identity after the cluster is created, and "
"the role assignment will take some time to take effect, see "
"https://docs.microsoft.com/azure/aks/use-managed-identity, "
"proceed to create cluster with system assigned identity?"
)
if not self.context.get_yes() and not prompt_y_n(
msg, default="n"
):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = (
self.context.get_user_assigned_identity_client_id()
)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(
self.cmd,
"Network Contributor",
identity_client_id,
scope=scope,
):
logger.warning(
"Could not create a role assignment for subnet. Are you an Owner on this subscription?"
)
self.context.set_intermediate(
"need_post_creation_vnet_permission_granting",
need_post_creation_vnet_permission_granting,
overwrite_exists=True,
)
def process_attach_acr(self, mc: ManagedCluster) -> None:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
attach_acr = self.context.get_attach_acr()
if attach_acr:
if not self.context.get_enable_managed_identity():
service_principal_profile = mc.service_principal_profile
_ensure_aks_acr(
self.cmd,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=self.context.get_subscription_id(),
)
def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
load_balancer_profile = create_load_balancer_profile(
self.context.get_load_balancer_managed_outbound_ip_count(),
self.context.get_load_balancer_outbound_ips(),
self.context.get_load_balancer_outbound_ip_prefixes(),
self.context.get_load_balancer_outbound_ports(),
self.context.get_load_balancer_idle_timeout(),
models=self.models.lb_models,
)
outbound_type = self.context.get_outbound_type(
load_balancer_profile=load_balancer_profile
)
load_balancer_sku = safe_lower(self.context.get_load_balancer_sku())
network_plugin = self.context.get_network_plugin()
(
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
) = (
self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy()
)
network_profile = None
if any(
[
network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
]
):
network_profile = self.models.ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku,
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
else:
if load_balancer_sku == "standard" or load_balancer_profile:
network_profile = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku,
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku == "basic":
network_profile = self.models.ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku,
)
mc.network_profile = network_profile
return mc
def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
ManagedClusterAddonProfile = self.models.ManagedClusterAddonProfile
addon_profiles = {}
# error out if '--enable-addons=virtual-node' is set but aci_subnet_name and vnet_subnet_id are not
addons = self.context.get_enable_addons()
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
workspace_resource_id = self.context.get_workspace_resource_id()
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
# post-process, create a deployment
_ensure_container_insights_for_monitoring(self.cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# set intermediate
self.context.set_intermediate("monitoring", True, overwrite_exists=True)
addons.remove('monitoring')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
aci_subnet_name = self.context.get_aci_subnet_name()
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = self.context.get_virtual_node_addon_os_type()
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
# set intermediate
self.context.set_intermediate("enable_virtual_node", True, overwrite_exists=True)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
appgw_name = self.context.get_appgw_name()
appgw_subnet_cidr = self.context.get_appgw_subnet_cidr()
appgw_id = self.context.get_appgw_id()
appgw_subnet_id = self.context.get_appgw_subnet_id()
appgw_watch_namespace = self.context.get_appgw_watch_namespace()
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
# set intermediate
self.context.set_intermediate("ingress_appgw_addon_enabled", True, overwrite_exists=True)
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if self.context.get_enable_sgxquotehelper():
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
mc.addon_profiles = addon_profiles
return mc
def set_up_aad_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
aad_profile = None
enable_aad = self.context.get_enable_aad()
if enable_aad:
aad_profile = self.models.ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=self.context.get_enable_azure_rbac(),
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=self.context.get_aad_admin_group_object_ids(),
tenant_id=self.context.get_aad_tenant_id()
)
else:
(
aad_client_app_id,
aad_server_app_id,
aad_server_app_secret,
) = (
self.context.get_aad_client_app_id_and_aad_server_app_id_and_aad_server_app_secret()
)
aad_tenant_id = self.context.get_aad_tenant_id()
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
aad_profile = self.models.ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
mc.aad_profile = aad_profile
return mc
def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
api_server_access_profile = None
api_server_authorized_ip_ranges = self.context.get_api_server_authorized_ip_ranges()
enable_private_cluster = self.context.get_enable_private_cluster()
disable_public_fqdn = self.context.get_disable_public_fqdn()
private_dns_zone = self.context.get_private_dns_zone()
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile(
authorized_ip_ranges=api_server_authorized_ip_ranges,
enable_private_cluster=True if enable_private_cluster else None,
enable_private_cluster_public_fqdn=False if disable_public_fqdn else None,
private_dns_zone=private_dns_zone
)
mc.api_server_access_profile = api_server_access_profile
fqdn_subdomain = self.context.get_fqdn_subdomain()
mc.fqdn_subdomain = fqdn_subdomain
return mc
def set_up_identity(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
identity = None
enable_managed_identity = self.context.get_enable_managed_identity()
assign_identity = self.context.get_assign_identity()
if enable_managed_identity and not assign_identity:
identity = self.models.ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: self.models.ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = self.models.ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc.identity = identity
return mc
def set_up_identity_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
identity_profile = None
assign_kubelet_identity = self.context.get_assign_kubelet_identity()
if assign_kubelet_identity:
kubelet_identity = self.context.get_identity_by_msi_client(assign_kubelet_identity)
identity_profile = {
'kubeletidentity': self.models.UserAssignedIdentity(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = self.context.get_user_assigned_identity_object_id()
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
self.cmd,
cluster_identity_object_id,
assign_kubelet_identity)
mc.identity_profile = identity_profile
return mc
def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
auto_upgrade_profile = None
auto_upgrade_channel = self.context.get_auto_upgrade_channel()
if auto_upgrade_channel:
auto_upgrade_profile = self.models.ManagedClusterAutoUpgradeProfile(upgrade_channel=auto_upgrade_channel)
mc.auto_upgrade_profile = auto_upgrade_profile
return mc
def set_up_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
def set_up_sku(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if self.context.get_uptime_sla():
mc.sku = self.models.ManagedClusterSKU(
name="Basic",
tier="Paid"
)
return mc
def set_up_extended_location(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
edge_zone = self.context.get_edge_zone()
if edge_zone:
mc.extended_location = self.models.ExtendedLocation(
name=edge_zone,
type=self.models.ExtendedLocationTypes.EDGE_ZONE
)
return mc
def build_custom_headers(self, mc: ManagedCluster) -> None:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen when the cluster enables managed
# identity. In this case, the header is useless and that's OK to not add this header.
custom_headers = None
if mc.service_principal_profile:
custom_headers = {'Ocp-Aad-Session-Key': self.context.get_intermediate("aad_session_key")}
self.context.set_intermediate("custom_headers", custom_headers, overwrite_exists=True)
def construct_default_mc_profile(self) -> ManagedCluster:
mc = self.init_mc()
mc = self.set_up_agent_pool_profiles(mc)
mc = self.set_up_linux_profile(mc)
mc = self.set_up_windows_profile(mc)
mc = self.set_up_service_principal_profile(mc)
self.process_add_role_assignment_for_vnet_subnet(mc)
self.process_attach_acr(mc)
mc = self.set_up_network_profile(mc)
mc = self.set_up_addon_profiles(mc)
mc = self.set_up_aad_profile(mc)
mc = self.set_up_api_server_access_profile(mc)
mc = self.set_up_identity(mc)
mc = self.set_up_identity_profile(mc)
mc = self.set_up_auto_upgrade_profile(mc)
mc = self.set_up_auto_scaler_profile(mc)
mc = self.set_up_sku(mc)
mc = self.set_up_extended_location(mc)
self.build_custom_headers(mc)
return mc
def create_mc(self, mc: ManagedCluster) -> ManagedCluster:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
self.cmd,
self.client,
self.context.get_subscription_id(),
self.context.get_resource_group_name(),
self.context.get_name(),
mc,
self.context.get_intermediate("monitoring"),
self.context.get_intermediate("ingress_appgw_addon_enabled"),
self.context.get_intermediate("enable_virtual_node"),
self.context.get_intermediate("need_post_creation_vnet_permission_granting"),
self.context.get_vnet_subnet_id(),
self.context.get_enable_managed_identity(),
self.context.get_attach_acr(),
self.context.get_intermediate("custom_headers"),
self.context.get_no_wait())
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
class AKSUpdateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client: ContainerServiceClient,
models: AKSModels,
raw_parameters: Dict,
):
self.cmd = cmd
self.client = client
self.models = models
self.context = AKSContext(cmd, raw_parameters, decorator_mode=DecoratorMode.UPDATE)
def check_raw_parameters(self):
excluded_keys = ("cmd", "client", "resource_group_name", "name")
is_changed = any(v for k, v in self.context.raw_param.items() if k not in excluded_keys)
is_default = (
self.context.get_cluster_autoscaler_profile() is None and
self.context.get_api_server_authorized_ip_ranges() is None
)
if not is_changed and is_default:
raise RequiredArgumentMissingError(
'Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--auto-upgrade-channel" or '
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-public-fqdn" or '
'"--disable-public-fqdn"'
)
def fetch_mc(self) -> ManagedCluster:
mc = self.client.get(self.context.get_resource_group_name(), self.context.get_name())
self.context.attach_mc(mc)
return mc
def update_auto_scaler_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
update_cluster_autoscaler,
enable_cluster_autoscaler,
disable_cluster_autoscaler,
min_count,
max_count,
) = (
self.context.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
)
if update_cluster_autoscaler or enable_cluster_autoscaler:
mc.agent_pool_profiles[0].enable_auto_scaling = True
mc.agent_pool_profiles[0].min_count = int(min_count)
mc.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
mc.agent_pool_profiles[0].enable_auto_scaling = False
mc.agent_pool_profiles[0].min_count = None
mc.agent_pool_profiles[0].max_count = None
cluster_autoscaler_profile = self.context.get_cluster_autoscaler_profile()
if cluster_autoscaler_profile is not None:
mc.auto_scaler_profile = cluster_autoscaler_profile
return mc
def process_attach_detach_acr(self, mc: ManagedCluster) -> None:
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
subscription_id = self.context.get_subscription_id()
client_id = self.context.get_client_id_from_identity_or_sp_profile()
attach_acr = self.context.get_attach_acr()
detach_acr = self.context.get_detach_acr()
if attach_acr:
_ensure_aks_acr(self.cmd,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(self.cmd,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
def update_default_mc_profile(self) -> ManagedCluster:
self.check_raw_parameters()
mc = self.fetch_mc()
mc = self.update_auto_scaler_profile(mc)
self.process_attach_detach_acr(mc)
return mc
def update_mc(self) -> ManagedCluster:
| true | true |
1c36112f0d18742a298bc33e81409e565e8c442a | 1,349 | py | Python | day03/day03.py | ceedee666/aoc_2020 | 75d9527b325760b612d477e5a28452f7714d2377 | [
"CC0-1.0"
] | 1 | 2020-12-04T18:47:25.000Z | 2020-12-04T18:47:25.000Z | day03/day03.py | ceedee666/aoc_2020 | 75d9527b325760b612d477e5a28452f7714d2377 | [
"CC0-1.0"
] | null | null | null | day03/day03.py | ceedee666/aoc_2020 | 75d9527b325760b612d477e5a28452f7714d2377 | [
"CC0-1.0"
] | null | null | null | from pathlib import Path
from functools import reduce
import typer
TREE = '#'
app = typer.Typer()
def read_input_file(input_file_path):
p = Path(input_file_path)
with p.open() as f:
lines = f.readlines()
lines = list(map(lambda s: s.strip(), lines))
return lines
def calculate_coordinates(x_steps, y_steps, toboggan_map):
return list(
map(
lambda e: (e[1], (y_steps * e[0]) % len(toboggan_map[0])),
enumerate(range(0, len(toboggan_map), x_steps))))
def count_trees(x_steps, y_steps, toboggan_map):
count = reduce(
lambda s, c: s + 1 if toboggan_map[c[0]][c[1]] == TREE else s,
calculate_coordinates(x_steps, y_steps, toboggan_map),
0)
return count
@app.command()
def part1(input_file: str):
toboggan_map = read_input_file(input_file)
number_of_trees = count_trees(1, 3, toboggan_map)
print(f"The number of trees on the trajectory is {number_of_trees}")
@app.command()
def part2(input_file: str):
toboggan_map = read_input_file(input_file)
slopes = [(1, 1), (1, 3), (1, 5), (1, 7), (2, 1)]
result = reduce(lambda r, s: r * count_trees(s[0], s[1], toboggan_map), slopes, 1)
print(f"The product of the number of trees on the trajectories is {result}")
if __name__ == "__main__":
app()
| 24.527273 | 86 | 0.635285 | from pathlib import Path
from functools import reduce
import typer
TREE = '#'
app = typer.Typer()
def read_input_file(input_file_path):
p = Path(input_file_path)
with p.open() as f:
lines = f.readlines()
lines = list(map(lambda s: s.strip(), lines))
return lines
def calculate_coordinates(x_steps, y_steps, toboggan_map):
return list(
map(
lambda e: (e[1], (y_steps * e[0]) % len(toboggan_map[0])),
enumerate(range(0, len(toboggan_map), x_steps))))
def count_trees(x_steps, y_steps, toboggan_map):
count = reduce(
lambda s, c: s + 1 if toboggan_map[c[0]][c[1]] == TREE else s,
calculate_coordinates(x_steps, y_steps, toboggan_map),
0)
return count
@app.command()
def part1(input_file: str):
toboggan_map = read_input_file(input_file)
number_of_trees = count_trees(1, 3, toboggan_map)
print(f"The number of trees on the trajectory is {number_of_trees}")
@app.command()
def part2(input_file: str):
toboggan_map = read_input_file(input_file)
slopes = [(1, 1), (1, 3), (1, 5), (1, 7), (2, 1)]
result = reduce(lambda r, s: r * count_trees(s[0], s[1], toboggan_map), slopes, 1)
print(f"The product of the number of trees on the trajectories is {result}")
if __name__ == "__main__":
app()
| true | true |
1c361171936f9f279e9a12a188bd835a535cd35c | 1,269 | py | Python | goldeneye/public/forms.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | goldeneye/public/forms.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | goldeneye/public/forms.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, BooleanField
from wtforms.validators import DataRequired
from goldeneye.user.models import User
class LoginForm(FlaskForm):
"""Login form."""
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField("Remember Me")
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| 30.95122 | 77 | 0.647754 |
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, BooleanField
from wtforms.validators import DataRequired
from goldeneye.user.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField("Remember Me")
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| true | true |
1c3612e6140a224079bc37f41e88762ab86e7638 | 444 | py | Python | reference_fetcher/tools.py | Phyks/arxiv_metadata | 2338906fec5bfdca6e4b6303b05a79856c2e8d37 | [
"MIT"
] | 2 | 2015-12-26T17:08:18.000Z | 2016-10-04T21:35:50.000Z | reference_fetcher/tools.py | Phyks/arxiv_metadata | 2338906fec5bfdca6e4b6303b05a79856c2e8d37 | [
"MIT"
] | null | null | null | reference_fetcher/tools.py | Phyks/arxiv_metadata | 2338906fec5bfdca6e4b6303b05a79856c2e8d37 | [
"MIT"
] | null | null | null | """
This file contains various utility functions.
"""
def replaceAll(text, replace_dict):
"""
Replace all the ``replace_dict`` keys by their associated item in ``text``.
"""
for i, j in replace_dict.items():
text = text.replace(i, j)
return text
def clean_whitespaces(text):
"""
Remove double whitespaces and trailing "." and "," from text.
"""
return ' '.join(text.strip().rstrip(".,").split())
| 22.2 | 79 | 0.617117 |
def replaceAll(text, replace_dict):
for i, j in replace_dict.items():
text = text.replace(i, j)
return text
def clean_whitespaces(text):
return ' '.join(text.strip().rstrip(".,").split())
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.